1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_fb_helper.h>
82 #include <drm/drm_fourcc.h>
83 #include <drm/drm_edid.h>
84 #include <drm/drm_vblank.h>
85 #include <drm/drm_audio_component.h>
86 
87 #if defined(CONFIG_DRM_AMD_DC_DCN)
88 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
89 
90 #include "dcn/dcn_1_0_offset.h"
91 #include "dcn/dcn_1_0_sh_mask.h"
92 #include "soc15_hw_ip.h"
93 #include "vega10_ip_offset.h"
94 
95 #include "soc15_common.h"
96 #endif
97 
98 #include "modules/inc/mod_freesync.h"
99 #include "modules/power/power_helpers.h"
100 #include "modules/inc/mod_info_packet.h"
101 
102 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
104 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
106 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
108 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
110 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
112 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
114 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
116 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
118 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
120 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
121 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
122 
123 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
124 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
125 
126 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
127 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
128 
129 /* Number of bytes in PSP header for firmware. */
130 #define PSP_HEADER_BYTES 0x100
131 
132 /* Number of bytes in PSP footer for firmware. */
133 #define PSP_FOOTER_BYTES 0x100
134 
135 /**
136  * DOC: overview
137  *
138  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
139  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
140  * requests into DC requests, and DC responses into DRM responses.
141  *
142  * The root control structure is &struct amdgpu_display_manager.
143  */
144 
145 /* basic init/fini API */
146 static int amdgpu_dm_init(struct amdgpu_device *adev);
147 static void amdgpu_dm_fini(struct amdgpu_device *adev);
148 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
149 
150 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
151 {
152 	switch (link->dpcd_caps.dongle_type) {
153 	case DISPLAY_DONGLE_NONE:
154 		return DRM_MODE_SUBCONNECTOR_Native;
155 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
156 		return DRM_MODE_SUBCONNECTOR_VGA;
157 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
158 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
159 		return DRM_MODE_SUBCONNECTOR_DVID;
160 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
161 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
162 		return DRM_MODE_SUBCONNECTOR_HDMIA;
163 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
164 	default:
165 		return DRM_MODE_SUBCONNECTOR_Unknown;
166 	}
167 }
168 
169 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
170 {
171 	struct dc_link *link = aconnector->dc_link;
172 	struct drm_connector *connector = &aconnector->base;
173 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
174 
175 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
176 		return;
177 
178 	if (aconnector->dc_sink)
179 		subconnector = get_subconnector_type(link);
180 
181 	drm_object_property_set_value(&connector->base,
182 			connector->dev->mode_config.dp_subconnector_property,
183 			subconnector);
184 }
185 
186 /*
187  * initializes drm_device display related structures, based on the information
188  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
189  * drm_encoder, drm_mode_config
190  *
191  * Returns 0 on success
192  */
193 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
194 /* removes and deallocates the drm structures, created by the above function */
195 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
196 
197 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
198 				struct drm_plane *plane,
199 				unsigned long possible_crtcs,
200 				const struct dc_plane_cap *plane_cap);
201 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
202 			       struct drm_plane *plane,
203 			       uint32_t link_index);
204 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
205 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
206 				    uint32_t link_index,
207 				    struct amdgpu_encoder *amdgpu_encoder);
208 static int amdgpu_dm_encoder_init(struct drm_device *dev,
209 				  struct amdgpu_encoder *aencoder,
210 				  uint32_t link_index);
211 
212 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
213 
214 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
215 
216 static int amdgpu_dm_atomic_check(struct drm_device *dev,
217 				  struct drm_atomic_state *state);
218 
219 static void handle_cursor_update(struct drm_plane *plane,
220 				 struct drm_plane_state *old_plane_state);
221 
222 static const struct drm_format_info *
223 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
224 
225 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
226 static void handle_hpd_rx_irq(void *param);
227 
228 static bool
229 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
230 				 struct drm_crtc_state *new_crtc_state);
231 /*
232  * dm_vblank_get_counter
233  *
234  * @brief
235  * Get counter for number of vertical blanks
236  *
237  * @param
238  * struct amdgpu_device *adev - [in] desired amdgpu device
239  * int disp_idx - [in] which CRTC to get the counter from
240  *
241  * @return
242  * Counter for vertical blanks
243  */
244 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
245 {
246 	if (crtc >= adev->mode_info.num_crtc)
247 		return 0;
248 	else {
249 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
250 
251 		if (acrtc->dm_irq_params.stream == NULL) {
252 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
253 				  crtc);
254 			return 0;
255 		}
256 
257 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
258 	}
259 }
260 
261 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
262 				  u32 *vbl, u32 *position)
263 {
264 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
265 
266 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
267 		return -EINVAL;
268 	else {
269 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
270 
271 		if (acrtc->dm_irq_params.stream ==  NULL) {
272 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
273 				  crtc);
274 			return 0;
275 		}
276 
277 		/*
278 		 * TODO rework base driver to use values directly.
279 		 * for now parse it back into reg-format
280 		 */
281 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
282 					 &v_blank_start,
283 					 &v_blank_end,
284 					 &h_position,
285 					 &v_position);
286 
287 		*position = v_position | (h_position << 16);
288 		*vbl = v_blank_start | (v_blank_end << 16);
289 	}
290 
291 	return 0;
292 }
293 
294 static bool dm_is_idle(void *handle)
295 {
296 	/* XXX todo */
297 	return true;
298 }
299 
300 static int dm_wait_for_idle(void *handle)
301 {
302 	/* XXX todo */
303 	return 0;
304 }
305 
306 static bool dm_check_soft_reset(void *handle)
307 {
308 	return false;
309 }
310 
311 static int dm_soft_reset(void *handle)
312 {
313 	/* XXX todo */
314 	return 0;
315 }
316 
317 static struct amdgpu_crtc *
318 get_crtc_by_otg_inst(struct amdgpu_device *adev,
319 		     int otg_inst)
320 {
321 	struct drm_device *dev = adev_to_drm(adev);
322 	struct drm_crtc *crtc;
323 	struct amdgpu_crtc *amdgpu_crtc;
324 
325 	if (WARN_ON(otg_inst == -1))
326 		return adev->mode_info.crtcs[0];
327 
328 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
329 		amdgpu_crtc = to_amdgpu_crtc(crtc);
330 
331 		if (amdgpu_crtc->otg_inst == otg_inst)
332 			return amdgpu_crtc;
333 	}
334 
335 	return NULL;
336 }
337 
338 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
339 {
340 	return acrtc->dm_irq_params.freesync_config.state ==
341 		       VRR_STATE_ACTIVE_VARIABLE ||
342 	       acrtc->dm_irq_params.freesync_config.state ==
343 		       VRR_STATE_ACTIVE_FIXED;
344 }
345 
346 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
347 {
348 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
349 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
350 }
351 
352 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
353 					      struct dm_crtc_state *new_state)
354 {
355 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
356 		return true;
357 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
358 		return true;
359 	else
360 		return false;
361 }
362 
363 /**
364  * dm_pflip_high_irq() - Handle pageflip interrupt
365  * @interrupt_params: ignored
366  *
367  * Handles the pageflip interrupt by notifying all interested parties
368  * that the pageflip has been completed.
369  */
370 static void dm_pflip_high_irq(void *interrupt_params)
371 {
372 	struct amdgpu_crtc *amdgpu_crtc;
373 	struct common_irq_params *irq_params = interrupt_params;
374 	struct amdgpu_device *adev = irq_params->adev;
375 	unsigned long flags;
376 	struct drm_pending_vblank_event *e;
377 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
378 	bool vrr_active;
379 
380 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
381 
382 	/* IRQ could occur when in initial stage */
383 	/* TODO work and BO cleanup */
384 	if (amdgpu_crtc == NULL) {
385 		DC_LOG_PFLIP("CRTC is null, returning.\n");
386 		return;
387 	}
388 
389 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
390 
391 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
392 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
393 						 amdgpu_crtc->pflip_status,
394 						 AMDGPU_FLIP_SUBMITTED,
395 						 amdgpu_crtc->crtc_id,
396 						 amdgpu_crtc);
397 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
398 		return;
399 	}
400 
401 	/* page flip completed. */
402 	e = amdgpu_crtc->event;
403 	amdgpu_crtc->event = NULL;
404 
405 	WARN_ON(!e);
406 
407 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
408 
409 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
410 	if (!vrr_active ||
411 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
412 				      &v_blank_end, &hpos, &vpos) ||
413 	    (vpos < v_blank_start)) {
414 		/* Update to correct count and vblank timestamp if racing with
415 		 * vblank irq. This also updates to the correct vblank timestamp
416 		 * even in VRR mode, as scanout is past the front-porch atm.
417 		 */
418 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
419 
420 		/* Wake up userspace by sending the pageflip event with proper
421 		 * count and timestamp of vblank of flip completion.
422 		 */
423 		if (e) {
424 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
425 
426 			/* Event sent, so done with vblank for this flip */
427 			drm_crtc_vblank_put(&amdgpu_crtc->base);
428 		}
429 	} else if (e) {
430 		/* VRR active and inside front-porch: vblank count and
431 		 * timestamp for pageflip event will only be up to date after
432 		 * drm_crtc_handle_vblank() has been executed from late vblank
433 		 * irq handler after start of back-porch (vline 0). We queue the
434 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
435 		 * updated timestamp and count, once it runs after us.
436 		 *
437 		 * We need to open-code this instead of using the helper
438 		 * drm_crtc_arm_vblank_event(), as that helper would
439 		 * call drm_crtc_accurate_vblank_count(), which we must
440 		 * not call in VRR mode while we are in front-porch!
441 		 */
442 
443 		/* sequence will be replaced by real count during send-out. */
444 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
445 		e->pipe = amdgpu_crtc->crtc_id;
446 
447 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
448 		e = NULL;
449 	}
450 
451 	/* Keep track of vblank of this flip for flip throttling. We use the
452 	 * cooked hw counter, as that one incremented at start of this vblank
453 	 * of pageflip completion, so last_flip_vblank is the forbidden count
454 	 * for queueing new pageflips if vsync + VRR is enabled.
455 	 */
456 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
457 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
458 
459 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
460 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
461 
462 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
463 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
464 		     vrr_active, (int) !e);
465 }
466 
467 static void dm_vupdate_high_irq(void *interrupt_params)
468 {
469 	struct common_irq_params *irq_params = interrupt_params;
470 	struct amdgpu_device *adev = irq_params->adev;
471 	struct amdgpu_crtc *acrtc;
472 	struct drm_device *drm_dev;
473 	struct drm_vblank_crtc *vblank;
474 	ktime_t frame_duration_ns, previous_timestamp;
475 	unsigned long flags;
476 	int vrr_active;
477 
478 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
479 
480 	if (acrtc) {
481 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
482 		drm_dev = acrtc->base.dev;
483 		vblank = &drm_dev->vblank[acrtc->base.index];
484 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
485 		frame_duration_ns = vblank->time - previous_timestamp;
486 
487 		if (frame_duration_ns > 0) {
488 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
489 						frame_duration_ns,
490 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
491 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
492 		}
493 
494 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
495 			      acrtc->crtc_id,
496 			      vrr_active);
497 
498 		/* Core vblank handling is done here after end of front-porch in
499 		 * vrr mode, as vblank timestamping will give valid results
500 		 * while now done after front-porch. This will also deliver
501 		 * page-flip completion events that have been queued to us
502 		 * if a pageflip happened inside front-porch.
503 		 */
504 		if (vrr_active) {
505 			drm_crtc_handle_vblank(&acrtc->base);
506 
507 			/* BTR processing for pre-DCE12 ASICs */
508 			if (acrtc->dm_irq_params.stream &&
509 			    adev->family < AMDGPU_FAMILY_AI) {
510 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
511 				mod_freesync_handle_v_update(
512 				    adev->dm.freesync_module,
513 				    acrtc->dm_irq_params.stream,
514 				    &acrtc->dm_irq_params.vrr_params);
515 
516 				dc_stream_adjust_vmin_vmax(
517 				    adev->dm.dc,
518 				    acrtc->dm_irq_params.stream,
519 				    &acrtc->dm_irq_params.vrr_params.adjust);
520 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
521 			}
522 		}
523 	}
524 }
525 
526 /**
527  * dm_crtc_high_irq() - Handles CRTC interrupt
528  * @interrupt_params: used for determining the CRTC instance
529  *
530  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
531  * event handler.
532  */
533 static void dm_crtc_high_irq(void *interrupt_params)
534 {
535 	struct common_irq_params *irq_params = interrupt_params;
536 	struct amdgpu_device *adev = irq_params->adev;
537 	struct amdgpu_crtc *acrtc;
538 	unsigned long flags;
539 	int vrr_active;
540 
541 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
542 	if (!acrtc)
543 		return;
544 
545 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
546 
547 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
548 		      vrr_active, acrtc->dm_irq_params.active_planes);
549 
550 	/**
551 	 * Core vblank handling at start of front-porch is only possible
552 	 * in non-vrr mode, as only there vblank timestamping will give
553 	 * valid results while done in front-porch. Otherwise defer it
554 	 * to dm_vupdate_high_irq after end of front-porch.
555 	 */
556 	if (!vrr_active)
557 		drm_crtc_handle_vblank(&acrtc->base);
558 
559 	/**
560 	 * Following stuff must happen at start of vblank, for crc
561 	 * computation and below-the-range btr support in vrr mode.
562 	 */
563 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
564 
565 	/* BTR updates need to happen before VUPDATE on Vega and above. */
566 	if (adev->family < AMDGPU_FAMILY_AI)
567 		return;
568 
569 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
570 
571 	if (acrtc->dm_irq_params.stream &&
572 	    acrtc->dm_irq_params.vrr_params.supported &&
573 	    acrtc->dm_irq_params.freesync_config.state ==
574 		    VRR_STATE_ACTIVE_VARIABLE) {
575 		mod_freesync_handle_v_update(adev->dm.freesync_module,
576 					     acrtc->dm_irq_params.stream,
577 					     &acrtc->dm_irq_params.vrr_params);
578 
579 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
580 					   &acrtc->dm_irq_params.vrr_params.adjust);
581 	}
582 
583 	/*
584 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
585 	 * In that case, pageflip completion interrupts won't fire and pageflip
586 	 * completion events won't get delivered. Prevent this by sending
587 	 * pending pageflip events from here if a flip is still pending.
588 	 *
589 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
590 	 * avoid race conditions between flip programming and completion,
591 	 * which could cause too early flip completion events.
592 	 */
593 	if (adev->family >= AMDGPU_FAMILY_RV &&
594 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
595 	    acrtc->dm_irq_params.active_planes == 0) {
596 		if (acrtc->event) {
597 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
598 			acrtc->event = NULL;
599 			drm_crtc_vblank_put(&acrtc->base);
600 		}
601 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
602 	}
603 
604 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
605 }
606 
607 #if defined(CONFIG_DRM_AMD_DC_DCN)
608 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
609 /**
610  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
611  * DCN generation ASICs
612  * @interrupt_params: interrupt parameters
613  *
614  * Used to set crc window/read out crc value at vertical line 0 position
615  */
616 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
617 {
618 	struct common_irq_params *irq_params = interrupt_params;
619 	struct amdgpu_device *adev = irq_params->adev;
620 	struct amdgpu_crtc *acrtc;
621 
622 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
623 
624 	if (!acrtc)
625 		return;
626 
627 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
628 }
629 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
630 
631 /**
632  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
633  * @adev: amdgpu_device pointer
634  * @notify: dmub notification structure
635  *
636  * Dmub AUX or SET_CONFIG command completion processing callback
637  * Copies dmub notification to DM which is to be read by AUX command.
638  * issuing thread and also signals the event to wake up the thread.
639  */
640 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
641 					struct dmub_notification *notify)
642 {
643 	if (adev->dm.dmub_notify)
644 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
645 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
646 		complete(&adev->dm.dmub_aux_transfer_done);
647 }
648 
649 /**
650  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
651  * @adev: amdgpu_device pointer
652  * @notify: dmub notification structure
653  *
654  * Dmub Hpd interrupt processing callback. Gets displayindex through the
655  * ink index and calls helper to do the processing.
656  */
657 static void dmub_hpd_callback(struct amdgpu_device *adev,
658 			      struct dmub_notification *notify)
659 {
660 	struct amdgpu_dm_connector *aconnector;
661 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
662 	struct drm_connector *connector;
663 	struct drm_connector_list_iter iter;
664 	struct dc_link *link;
665 	uint8_t link_index = 0;
666 	struct drm_device *dev;
667 
668 	if (adev == NULL)
669 		return;
670 
671 	if (notify == NULL) {
672 		DRM_ERROR("DMUB HPD callback notification was NULL");
673 		return;
674 	}
675 
676 	if (notify->link_index > adev->dm.dc->link_count) {
677 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
678 		return;
679 	}
680 
681 	link_index = notify->link_index;
682 	link = adev->dm.dc->links[link_index];
683 	dev = adev->dm.ddev;
684 
685 	drm_connector_list_iter_begin(dev, &iter);
686 	drm_for_each_connector_iter(connector, &iter) {
687 		aconnector = to_amdgpu_dm_connector(connector);
688 		if (link && aconnector->dc_link == link) {
689 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
690 			hpd_aconnector = aconnector;
691 			break;
692 		}
693 	}
694 	drm_connector_list_iter_end(&iter);
695 
696 	if (hpd_aconnector) {
697 		if (notify->type == DMUB_NOTIFICATION_HPD)
698 			handle_hpd_irq_helper(hpd_aconnector);
699 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
700 			handle_hpd_rx_irq(hpd_aconnector);
701 	}
702 }
703 
704 /**
705  * register_dmub_notify_callback - Sets callback for DMUB notify
706  * @adev: amdgpu_device pointer
707  * @type: Type of dmub notification
708  * @callback: Dmub interrupt callback function
709  * @dmub_int_thread_offload: offload indicator
710  *
711  * API to register a dmub callback handler for a dmub notification
712  * Also sets indicator whether callback processing to be offloaded.
713  * to dmub interrupt handling thread
714  * Return: true if successfully registered, false if there is existing registration
715  */
716 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
717 					  enum dmub_notification_type type,
718 					  dmub_notify_interrupt_callback_t callback,
719 					  bool dmub_int_thread_offload)
720 {
721 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
722 		adev->dm.dmub_callback[type] = callback;
723 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
724 	} else
725 		return false;
726 
727 	return true;
728 }
729 
730 static void dm_handle_hpd_work(struct work_struct *work)
731 {
732 	struct dmub_hpd_work *dmub_hpd_wrk;
733 
734 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
735 
736 	if (!dmub_hpd_wrk->dmub_notify) {
737 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
738 		return;
739 	}
740 
741 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
742 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
743 		dmub_hpd_wrk->dmub_notify);
744 	}
745 
746 	kfree(dmub_hpd_wrk->dmub_notify);
747 	kfree(dmub_hpd_wrk);
748 
749 }
750 
751 #define DMUB_TRACE_MAX_READ 64
752 /**
753  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
754  * @interrupt_params: used for determining the Outbox instance
755  *
756  * Handles the Outbox Interrupt
757  * event handler.
758  */
759 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
760 {
761 	struct dmub_notification notify;
762 	struct common_irq_params *irq_params = interrupt_params;
763 	struct amdgpu_device *adev = irq_params->adev;
764 	struct amdgpu_display_manager *dm = &adev->dm;
765 	struct dmcub_trace_buf_entry entry = { 0 };
766 	uint32_t count = 0;
767 	struct dmub_hpd_work *dmub_hpd_wrk;
768 	struct dc_link *plink = NULL;
769 
770 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
771 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
772 
773 		do {
774 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
775 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
776 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
777 				continue;
778 			}
779 			if (!dm->dmub_callback[notify.type]) {
780 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
781 				continue;
782 			}
783 			if (dm->dmub_thread_offload[notify.type] == true) {
784 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
785 				if (!dmub_hpd_wrk) {
786 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
787 					return;
788 				}
789 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
790 				if (!dmub_hpd_wrk->dmub_notify) {
791 					kfree(dmub_hpd_wrk);
792 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
793 					return;
794 				}
795 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
796 				if (dmub_hpd_wrk->dmub_notify)
797 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
798 				dmub_hpd_wrk->adev = adev;
799 				if (notify.type == DMUB_NOTIFICATION_HPD) {
800 					plink = adev->dm.dc->links[notify.link_index];
801 					if (plink) {
802 						plink->hpd_status =
803 							notify.hpd_status == DP_HPD_PLUG;
804 					}
805 				}
806 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
807 			} else {
808 				dm->dmub_callback[notify.type](adev, &notify);
809 			}
810 		} while (notify.pending_notification);
811 	}
812 
813 
814 	do {
815 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
816 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
817 							entry.param0, entry.param1);
818 
819 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
820 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
821 		} else
822 			break;
823 
824 		count++;
825 
826 	} while (count <= DMUB_TRACE_MAX_READ);
827 
828 	if (count > DMUB_TRACE_MAX_READ)
829 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
830 }
831 #endif /* CONFIG_DRM_AMD_DC_DCN */
832 
833 static int dm_set_clockgating_state(void *handle,
834 		  enum amd_clockgating_state state)
835 {
836 	return 0;
837 }
838 
839 static int dm_set_powergating_state(void *handle,
840 		  enum amd_powergating_state state)
841 {
842 	return 0;
843 }
844 
845 /* Prototypes of private functions */
846 static int dm_early_init(void* handle);
847 
848 /* Allocate memory for FBC compressed data  */
849 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
850 {
851 	struct drm_device *dev = connector->dev;
852 	struct amdgpu_device *adev = drm_to_adev(dev);
853 	struct dm_compressor_info *compressor = &adev->dm.compressor;
854 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
855 	struct drm_display_mode *mode;
856 	unsigned long max_size = 0;
857 
858 	if (adev->dm.dc->fbc_compressor == NULL)
859 		return;
860 
861 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
862 		return;
863 
864 	if (compressor->bo_ptr)
865 		return;
866 
867 
868 	list_for_each_entry(mode, &connector->modes, head) {
869 		if (max_size < mode->htotal * mode->vtotal)
870 			max_size = mode->htotal * mode->vtotal;
871 	}
872 
873 	if (max_size) {
874 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
875 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
876 			    &compressor->gpu_addr, &compressor->cpu_addr);
877 
878 		if (r)
879 			DRM_ERROR("DM: Failed to initialize FBC\n");
880 		else {
881 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
882 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
883 		}
884 
885 	}
886 
887 }
888 
889 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
890 					  int pipe, bool *enabled,
891 					  unsigned char *buf, int max_bytes)
892 {
893 	struct drm_device *dev = dev_get_drvdata(kdev);
894 	struct amdgpu_device *adev = drm_to_adev(dev);
895 	struct drm_connector *connector;
896 	struct drm_connector_list_iter conn_iter;
897 	struct amdgpu_dm_connector *aconnector;
898 	int ret = 0;
899 
900 	*enabled = false;
901 
902 	mutex_lock(&adev->dm.audio_lock);
903 
904 	drm_connector_list_iter_begin(dev, &conn_iter);
905 	drm_for_each_connector_iter(connector, &conn_iter) {
906 		aconnector = to_amdgpu_dm_connector(connector);
907 		if (aconnector->audio_inst != port)
908 			continue;
909 
910 		*enabled = true;
911 		ret = drm_eld_size(connector->eld);
912 		memcpy(buf, connector->eld, min(max_bytes, ret));
913 
914 		break;
915 	}
916 	drm_connector_list_iter_end(&conn_iter);
917 
918 	mutex_unlock(&adev->dm.audio_lock);
919 
920 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
921 
922 	return ret;
923 }
924 
925 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
926 	.get_eld = amdgpu_dm_audio_component_get_eld,
927 };
928 
929 static int amdgpu_dm_audio_component_bind(struct device *kdev,
930 				       struct device *hda_kdev, void *data)
931 {
932 	struct drm_device *dev = dev_get_drvdata(kdev);
933 	struct amdgpu_device *adev = drm_to_adev(dev);
934 	struct drm_audio_component *acomp = data;
935 
936 	acomp->ops = &amdgpu_dm_audio_component_ops;
937 	acomp->dev = kdev;
938 	adev->dm.audio_component = acomp;
939 
940 	return 0;
941 }
942 
943 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
944 					  struct device *hda_kdev, void *data)
945 {
946 	struct drm_device *dev = dev_get_drvdata(kdev);
947 	struct amdgpu_device *adev = drm_to_adev(dev);
948 	struct drm_audio_component *acomp = data;
949 
950 	acomp->ops = NULL;
951 	acomp->dev = NULL;
952 	adev->dm.audio_component = NULL;
953 }
954 
955 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
956 	.bind	= amdgpu_dm_audio_component_bind,
957 	.unbind	= amdgpu_dm_audio_component_unbind,
958 };
959 
960 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
961 {
962 	int i, ret;
963 
964 	if (!amdgpu_audio)
965 		return 0;
966 
967 	adev->mode_info.audio.enabled = true;
968 
969 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
970 
971 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
972 		adev->mode_info.audio.pin[i].channels = -1;
973 		adev->mode_info.audio.pin[i].rate = -1;
974 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
975 		adev->mode_info.audio.pin[i].status_bits = 0;
976 		adev->mode_info.audio.pin[i].category_code = 0;
977 		adev->mode_info.audio.pin[i].connected = false;
978 		adev->mode_info.audio.pin[i].id =
979 			adev->dm.dc->res_pool->audios[i]->inst;
980 		adev->mode_info.audio.pin[i].offset = 0;
981 	}
982 
983 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
984 	if (ret < 0)
985 		return ret;
986 
987 	adev->dm.audio_registered = true;
988 
989 	return 0;
990 }
991 
992 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
993 {
994 	if (!amdgpu_audio)
995 		return;
996 
997 	if (!adev->mode_info.audio.enabled)
998 		return;
999 
1000 	if (adev->dm.audio_registered) {
1001 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1002 		adev->dm.audio_registered = false;
1003 	}
1004 
1005 	/* TODO: Disable audio? */
1006 
1007 	adev->mode_info.audio.enabled = false;
1008 }
1009 
1010 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1011 {
1012 	struct drm_audio_component *acomp = adev->dm.audio_component;
1013 
1014 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1015 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1016 
1017 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1018 						 pin, -1);
1019 	}
1020 }
1021 
1022 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1023 {
1024 	const struct dmcub_firmware_header_v1_0 *hdr;
1025 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1026 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1027 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1028 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1029 	struct abm *abm = adev->dm.dc->res_pool->abm;
1030 	struct dmub_srv_hw_params hw_params;
1031 	enum dmub_status status;
1032 	const unsigned char *fw_inst_const, *fw_bss_data;
1033 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1034 	bool has_hw_support;
1035 
1036 	if (!dmub_srv)
1037 		/* DMUB isn't supported on the ASIC. */
1038 		return 0;
1039 
1040 	if (!fb_info) {
1041 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1042 		return -EINVAL;
1043 	}
1044 
1045 	if (!dmub_fw) {
1046 		/* Firmware required for DMUB support. */
1047 		DRM_ERROR("No firmware provided for DMUB.\n");
1048 		return -EINVAL;
1049 	}
1050 
1051 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1052 	if (status != DMUB_STATUS_OK) {
1053 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1054 		return -EINVAL;
1055 	}
1056 
1057 	if (!has_hw_support) {
1058 		DRM_INFO("DMUB unsupported on ASIC\n");
1059 		return 0;
1060 	}
1061 
1062 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1063 	status = dmub_srv_hw_reset(dmub_srv);
1064 	if (status != DMUB_STATUS_OK)
1065 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1066 
1067 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1068 
1069 	fw_inst_const = dmub_fw->data +
1070 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1071 			PSP_HEADER_BYTES;
1072 
1073 	fw_bss_data = dmub_fw->data +
1074 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1075 		      le32_to_cpu(hdr->inst_const_bytes);
1076 
1077 	/* Copy firmware and bios info into FB memory. */
1078 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1079 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1080 
1081 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1082 
1083 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1084 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1085 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1086 	 * will be done by dm_dmub_hw_init
1087 	 */
1088 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1089 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1090 				fw_inst_const_size);
1091 	}
1092 
1093 	if (fw_bss_data_size)
1094 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1095 		       fw_bss_data, fw_bss_data_size);
1096 
1097 	/* Copy firmware bios info into FB memory. */
1098 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1099 	       adev->bios_size);
1100 
1101 	/* Reset regions that need to be reset. */
1102 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1103 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1104 
1105 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1106 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1107 
1108 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1109 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1110 
1111 	/* Initialize hardware. */
1112 	memset(&hw_params, 0, sizeof(hw_params));
1113 	hw_params.fb_base = adev->gmc.fb_start;
1114 	hw_params.fb_offset = adev->gmc.aper_base;
1115 
1116 	/* backdoor load firmware and trigger dmub running */
1117 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1118 		hw_params.load_inst_const = true;
1119 
1120 	if (dmcu)
1121 		hw_params.psp_version = dmcu->psp_version;
1122 
1123 	for (i = 0; i < fb_info->num_fb; ++i)
1124 		hw_params.fb[i] = &fb_info->fb[i];
1125 
1126 	switch (adev->ip_versions[DCE_HWIP][0]) {
1127 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1128 		hw_params.dpia_supported = true;
1129 #if defined(CONFIG_DRM_AMD_DC_DCN)
1130 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1131 #endif
1132 		break;
1133 	default:
1134 		break;
1135 	}
1136 
1137 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1138 	if (status != DMUB_STATUS_OK) {
1139 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1140 		return -EINVAL;
1141 	}
1142 
1143 	/* Wait for firmware load to finish. */
1144 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1145 	if (status != DMUB_STATUS_OK)
1146 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1147 
1148 	/* Init DMCU and ABM if available. */
1149 	if (dmcu && abm) {
1150 		dmcu->funcs->dmcu_init(dmcu);
1151 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1152 	}
1153 
1154 	if (!adev->dm.dc->ctx->dmub_srv)
1155 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1156 	if (!adev->dm.dc->ctx->dmub_srv) {
1157 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1158 		return -ENOMEM;
1159 	}
1160 
1161 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1162 		 adev->dm.dmcub_fw_version);
1163 
1164 	return 0;
1165 }
1166 
1167 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1168 {
1169 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1170 	enum dmub_status status;
1171 	bool init;
1172 
1173 	if (!dmub_srv) {
1174 		/* DMUB isn't supported on the ASIC. */
1175 		return;
1176 	}
1177 
1178 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1179 	if (status != DMUB_STATUS_OK)
1180 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1181 
1182 	if (status == DMUB_STATUS_OK && init) {
1183 		/* Wait for firmware load to finish. */
1184 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1185 		if (status != DMUB_STATUS_OK)
1186 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1187 	} else {
1188 		/* Perform the full hardware initialization. */
1189 		dm_dmub_hw_init(adev);
1190 	}
1191 }
1192 
1193 #if defined(CONFIG_DRM_AMD_DC_DCN)
1194 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1195 {
1196 	uint64_t pt_base;
1197 	uint32_t logical_addr_low;
1198 	uint32_t logical_addr_high;
1199 	uint32_t agp_base, agp_bot, agp_top;
1200 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1201 
1202 	memset(pa_config, 0, sizeof(*pa_config));
1203 
1204 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1205 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1206 
1207 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1208 		/*
1209 		 * Raven2 has a HW issue that it is unable to use the vram which
1210 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1211 		 * workaround that increase system aperture high address (add 1)
1212 		 * to get rid of the VM fault and hardware hang.
1213 		 */
1214 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1215 	else
1216 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1217 
1218 	agp_base = 0;
1219 	agp_bot = adev->gmc.agp_start >> 24;
1220 	agp_top = adev->gmc.agp_end >> 24;
1221 
1222 
1223 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1224 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1225 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1226 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1227 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1228 	page_table_base.low_part = lower_32_bits(pt_base);
1229 
1230 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1231 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1232 
1233 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1234 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1235 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1236 
1237 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1238 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1239 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1240 
1241 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1242 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1243 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1244 
1245 	pa_config->is_hvm_enabled = 0;
1246 
1247 }
1248 #endif
1249 #if defined(CONFIG_DRM_AMD_DC_DCN)
1250 static void vblank_control_worker(struct work_struct *work)
1251 {
1252 	struct vblank_control_work *vblank_work =
1253 		container_of(work, struct vblank_control_work, work);
1254 	struct amdgpu_display_manager *dm = vblank_work->dm;
1255 
1256 	mutex_lock(&dm->dc_lock);
1257 
1258 	if (vblank_work->enable)
1259 		dm->active_vblank_irq_count++;
1260 	else if(dm->active_vblank_irq_count)
1261 		dm->active_vblank_irq_count--;
1262 
1263 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1264 
1265 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1266 
1267 	/* Control PSR based on vblank requirements from OS */
1268 	if (vblank_work->stream && vblank_work->stream->link) {
1269 		if (vblank_work->enable) {
1270 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1271 				amdgpu_dm_psr_disable(vblank_work->stream);
1272 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1273 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1274 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1275 			amdgpu_dm_psr_enable(vblank_work->stream);
1276 		}
1277 	}
1278 
1279 	mutex_unlock(&dm->dc_lock);
1280 
1281 	dc_stream_release(vblank_work->stream);
1282 
1283 	kfree(vblank_work);
1284 }
1285 
1286 #endif
1287 
1288 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1289 {
1290 	struct hpd_rx_irq_offload_work *offload_work;
1291 	struct amdgpu_dm_connector *aconnector;
1292 	struct dc_link *dc_link;
1293 	struct amdgpu_device *adev;
1294 	enum dc_connection_type new_connection_type = dc_connection_none;
1295 	unsigned long flags;
1296 
1297 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1298 	aconnector = offload_work->offload_wq->aconnector;
1299 
1300 	if (!aconnector) {
1301 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1302 		goto skip;
1303 	}
1304 
1305 	adev = drm_to_adev(aconnector->base.dev);
1306 	dc_link = aconnector->dc_link;
1307 
1308 	mutex_lock(&aconnector->hpd_lock);
1309 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1310 		DRM_ERROR("KMS: Failed to detect connector\n");
1311 	mutex_unlock(&aconnector->hpd_lock);
1312 
1313 	if (new_connection_type == dc_connection_none)
1314 		goto skip;
1315 
1316 	if (amdgpu_in_reset(adev))
1317 		goto skip;
1318 
1319 	mutex_lock(&adev->dm.dc_lock);
1320 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1321 		dc_link_dp_handle_automated_test(dc_link);
1322 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1323 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1324 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1325 		dc_link_dp_handle_link_loss(dc_link);
1326 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1327 		offload_work->offload_wq->is_handling_link_loss = false;
1328 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1329 	}
1330 	mutex_unlock(&adev->dm.dc_lock);
1331 
1332 skip:
1333 	kfree(offload_work);
1334 
1335 }
1336 
1337 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1338 {
1339 	int max_caps = dc->caps.max_links;
1340 	int i = 0;
1341 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1342 
1343 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1344 
1345 	if (!hpd_rx_offload_wq)
1346 		return NULL;
1347 
1348 
1349 	for (i = 0; i < max_caps; i++) {
1350 		hpd_rx_offload_wq[i].wq =
1351 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1352 
1353 		if (hpd_rx_offload_wq[i].wq == NULL) {
1354 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1355 			return NULL;
1356 		}
1357 
1358 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1359 	}
1360 
1361 	return hpd_rx_offload_wq;
1362 }
1363 
1364 struct amdgpu_stutter_quirk {
1365 	u16 chip_vendor;
1366 	u16 chip_device;
1367 	u16 subsys_vendor;
1368 	u16 subsys_device;
1369 	u8 revision;
1370 };
1371 
1372 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1373 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1374 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1375 	{ 0, 0, 0, 0, 0 },
1376 };
1377 
1378 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1379 {
1380 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1381 
1382 	while (p && p->chip_device != 0) {
1383 		if (pdev->vendor == p->chip_vendor &&
1384 		    pdev->device == p->chip_device &&
1385 		    pdev->subsystem_vendor == p->subsys_vendor &&
1386 		    pdev->subsystem_device == p->subsys_device &&
1387 		    pdev->revision == p->revision) {
1388 			return true;
1389 		}
1390 		++p;
1391 	}
1392 	return false;
1393 }
1394 
1395 static int amdgpu_dm_init(struct amdgpu_device *adev)
1396 {
1397 	struct dc_init_data init_data;
1398 #ifdef CONFIG_DRM_AMD_DC_HDCP
1399 	struct dc_callback_init init_params;
1400 #endif
1401 	int r;
1402 
1403 	adev->dm.ddev = adev_to_drm(adev);
1404 	adev->dm.adev = adev;
1405 
1406 	/* Zero all the fields */
1407 	memset(&init_data, 0, sizeof(init_data));
1408 #ifdef CONFIG_DRM_AMD_DC_HDCP
1409 	memset(&init_params, 0, sizeof(init_params));
1410 #endif
1411 
1412 	mutex_init(&adev->dm.dc_lock);
1413 	mutex_init(&adev->dm.audio_lock);
1414 #if defined(CONFIG_DRM_AMD_DC_DCN)
1415 	spin_lock_init(&adev->dm.vblank_lock);
1416 #endif
1417 
1418 	if(amdgpu_dm_irq_init(adev)) {
1419 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1420 		goto error;
1421 	}
1422 
1423 	init_data.asic_id.chip_family = adev->family;
1424 
1425 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1426 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1427 	init_data.asic_id.chip_id = adev->pdev->device;
1428 
1429 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1430 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1431 	init_data.asic_id.atombios_base_address =
1432 		adev->mode_info.atom_context->bios;
1433 
1434 	init_data.driver = adev;
1435 
1436 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1437 
1438 	if (!adev->dm.cgs_device) {
1439 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1440 		goto error;
1441 	}
1442 
1443 	init_data.cgs_device = adev->dm.cgs_device;
1444 
1445 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1446 
1447 	switch (adev->ip_versions[DCE_HWIP][0]) {
1448 	case IP_VERSION(2, 1, 0):
1449 		switch (adev->dm.dmcub_fw_version) {
1450 		case 0: /* development */
1451 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1452 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1453 			init_data.flags.disable_dmcu = false;
1454 			break;
1455 		default:
1456 			init_data.flags.disable_dmcu = true;
1457 		}
1458 		break;
1459 	case IP_VERSION(2, 0, 3):
1460 		init_data.flags.disable_dmcu = true;
1461 		break;
1462 	default:
1463 		break;
1464 	}
1465 
1466 	switch (adev->asic_type) {
1467 	case CHIP_CARRIZO:
1468 	case CHIP_STONEY:
1469 		init_data.flags.gpu_vm_support = true;
1470 		break;
1471 	default:
1472 		switch (adev->ip_versions[DCE_HWIP][0]) {
1473 		case IP_VERSION(1, 0, 0):
1474 		case IP_VERSION(1, 0, 1):
1475 			/* enable S/G on PCO and RV2 */
1476 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1477 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1478 				init_data.flags.gpu_vm_support = true;
1479 			break;
1480 		case IP_VERSION(2, 1, 0):
1481 		case IP_VERSION(3, 0, 1):
1482 		case IP_VERSION(3, 1, 2):
1483 		case IP_VERSION(3, 1, 3):
1484 		case IP_VERSION(3, 1, 5):
1485 		case IP_VERSION(3, 1, 6):
1486 			init_data.flags.gpu_vm_support = true;
1487 			break;
1488 		default:
1489 			break;
1490 		}
1491 		break;
1492 	}
1493 
1494 	if (init_data.flags.gpu_vm_support)
1495 		adev->mode_info.gpu_vm_support = true;
1496 
1497 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1498 		init_data.flags.fbc_support = true;
1499 
1500 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1501 		init_data.flags.multi_mon_pp_mclk_switch = true;
1502 
1503 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1504 		init_data.flags.disable_fractional_pwm = true;
1505 
1506 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1507 		init_data.flags.edp_no_power_sequencing = true;
1508 
1509 #ifdef CONFIG_DRM_AMD_DC_DCN
1510 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1511 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1512 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1513 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1514 #endif
1515 
1516 	init_data.flags.seamless_boot_edp_requested = false;
1517 
1518 	if (check_seamless_boot_capability(adev)) {
1519 		init_data.flags.seamless_boot_edp_requested = true;
1520 		init_data.flags.allow_seamless_boot_optimization = true;
1521 		DRM_INFO("Seamless boot condition check passed\n");
1522 	}
1523 
1524 	INIT_LIST_HEAD(&adev->dm.da_list);
1525 	/* Display Core create. */
1526 	adev->dm.dc = dc_create(&init_data);
1527 
1528 	if (adev->dm.dc) {
1529 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1530 	} else {
1531 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1532 		goto error;
1533 	}
1534 
1535 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1536 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1537 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1538 	}
1539 
1540 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1541 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1542 	if (dm_should_disable_stutter(adev->pdev))
1543 		adev->dm.dc->debug.disable_stutter = true;
1544 
1545 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1546 		adev->dm.dc->debug.disable_stutter = true;
1547 
1548 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1549 		adev->dm.dc->debug.disable_dsc = true;
1550 		adev->dm.dc->debug.disable_dsc_edp = true;
1551 	}
1552 
1553 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1554 		adev->dm.dc->debug.disable_clock_gate = true;
1555 
1556 	r = dm_dmub_hw_init(adev);
1557 	if (r) {
1558 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1559 		goto error;
1560 	}
1561 
1562 	dc_hardware_init(adev->dm.dc);
1563 
1564 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1565 	if (!adev->dm.hpd_rx_offload_wq) {
1566 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1567 		goto error;
1568 	}
1569 
1570 #if defined(CONFIG_DRM_AMD_DC_DCN)
1571 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1572 		struct dc_phy_addr_space_config pa_config;
1573 
1574 		mmhub_read_system_context(adev, &pa_config);
1575 
1576 		// Call the DC init_memory func
1577 		dc_setup_system_context(adev->dm.dc, &pa_config);
1578 	}
1579 #endif
1580 
1581 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1582 	if (!adev->dm.freesync_module) {
1583 		DRM_ERROR(
1584 		"amdgpu: failed to initialize freesync_module.\n");
1585 	} else
1586 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1587 				adev->dm.freesync_module);
1588 
1589 	amdgpu_dm_init_color_mod();
1590 
1591 #if defined(CONFIG_DRM_AMD_DC_DCN)
1592 	if (adev->dm.dc->caps.max_links > 0) {
1593 		adev->dm.vblank_control_workqueue =
1594 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1595 		if (!adev->dm.vblank_control_workqueue)
1596 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1597 	}
1598 #endif
1599 
1600 #ifdef CONFIG_DRM_AMD_DC_HDCP
1601 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1602 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1603 
1604 		if (!adev->dm.hdcp_workqueue)
1605 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1606 		else
1607 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1608 
1609 		dc_init_callbacks(adev->dm.dc, &init_params);
1610 	}
1611 #endif
1612 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1613 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1614 #endif
1615 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1616 		init_completion(&adev->dm.dmub_aux_transfer_done);
1617 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1618 		if (!adev->dm.dmub_notify) {
1619 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1620 			goto error;
1621 		}
1622 
1623 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1624 		if (!adev->dm.delayed_hpd_wq) {
1625 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1626 			goto error;
1627 		}
1628 
1629 		amdgpu_dm_outbox_init(adev);
1630 #if defined(CONFIG_DRM_AMD_DC_DCN)
1631 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1632 			dmub_aux_setconfig_callback, false)) {
1633 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1634 			goto error;
1635 		}
1636 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1637 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1638 			goto error;
1639 		}
1640 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1641 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1642 			goto error;
1643 		}
1644 #endif /* CONFIG_DRM_AMD_DC_DCN */
1645 	}
1646 
1647 	if (amdgpu_dm_initialize_drm_device(adev)) {
1648 		DRM_ERROR(
1649 		"amdgpu: failed to initialize sw for display support.\n");
1650 		goto error;
1651 	}
1652 
1653 	/* create fake encoders for MST */
1654 	dm_dp_create_fake_mst_encoders(adev);
1655 
1656 	/* TODO: Add_display_info? */
1657 
1658 	/* TODO use dynamic cursor width */
1659 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1660 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1661 
1662 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1663 		DRM_ERROR(
1664 		"amdgpu: failed to initialize sw for display support.\n");
1665 		goto error;
1666 	}
1667 
1668 
1669 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1670 
1671 	return 0;
1672 error:
1673 	amdgpu_dm_fini(adev);
1674 
1675 	return -EINVAL;
1676 }
1677 
1678 static int amdgpu_dm_early_fini(void *handle)
1679 {
1680 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1681 
1682 	amdgpu_dm_audio_fini(adev);
1683 
1684 	return 0;
1685 }
1686 
1687 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1688 {
1689 	int i;
1690 
1691 #if defined(CONFIG_DRM_AMD_DC_DCN)
1692 	if (adev->dm.vblank_control_workqueue) {
1693 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1694 		adev->dm.vblank_control_workqueue = NULL;
1695 	}
1696 #endif
1697 
1698 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1699 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1700 	}
1701 
1702 	amdgpu_dm_destroy_drm_device(&adev->dm);
1703 
1704 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1705 	if (adev->dm.crc_rd_wrk) {
1706 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1707 		kfree(adev->dm.crc_rd_wrk);
1708 		adev->dm.crc_rd_wrk = NULL;
1709 	}
1710 #endif
1711 #ifdef CONFIG_DRM_AMD_DC_HDCP
1712 	if (adev->dm.hdcp_workqueue) {
1713 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1714 		adev->dm.hdcp_workqueue = NULL;
1715 	}
1716 
1717 	if (adev->dm.dc)
1718 		dc_deinit_callbacks(adev->dm.dc);
1719 #endif
1720 
1721 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1722 
1723 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1724 		kfree(adev->dm.dmub_notify);
1725 		adev->dm.dmub_notify = NULL;
1726 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1727 		adev->dm.delayed_hpd_wq = NULL;
1728 	}
1729 
1730 	if (adev->dm.dmub_bo)
1731 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1732 				      &adev->dm.dmub_bo_gpu_addr,
1733 				      &adev->dm.dmub_bo_cpu_addr);
1734 
1735 	if (adev->dm.hpd_rx_offload_wq) {
1736 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1737 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1738 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1739 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1740 			}
1741 		}
1742 
1743 		kfree(adev->dm.hpd_rx_offload_wq);
1744 		adev->dm.hpd_rx_offload_wq = NULL;
1745 	}
1746 
1747 	/* DC Destroy TODO: Replace destroy DAL */
1748 	if (adev->dm.dc)
1749 		dc_destroy(&adev->dm.dc);
1750 	/*
1751 	 * TODO: pageflip, vlank interrupt
1752 	 *
1753 	 * amdgpu_dm_irq_fini(adev);
1754 	 */
1755 
1756 	if (adev->dm.cgs_device) {
1757 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1758 		adev->dm.cgs_device = NULL;
1759 	}
1760 	if (adev->dm.freesync_module) {
1761 		mod_freesync_destroy(adev->dm.freesync_module);
1762 		adev->dm.freesync_module = NULL;
1763 	}
1764 
1765 	mutex_destroy(&adev->dm.audio_lock);
1766 	mutex_destroy(&adev->dm.dc_lock);
1767 
1768 	return;
1769 }
1770 
1771 static int load_dmcu_fw(struct amdgpu_device *adev)
1772 {
1773 	const char *fw_name_dmcu = NULL;
1774 	int r;
1775 	const struct dmcu_firmware_header_v1_0 *hdr;
1776 
1777 	switch(adev->asic_type) {
1778 #if defined(CONFIG_DRM_AMD_DC_SI)
1779 	case CHIP_TAHITI:
1780 	case CHIP_PITCAIRN:
1781 	case CHIP_VERDE:
1782 	case CHIP_OLAND:
1783 #endif
1784 	case CHIP_BONAIRE:
1785 	case CHIP_HAWAII:
1786 	case CHIP_KAVERI:
1787 	case CHIP_KABINI:
1788 	case CHIP_MULLINS:
1789 	case CHIP_TONGA:
1790 	case CHIP_FIJI:
1791 	case CHIP_CARRIZO:
1792 	case CHIP_STONEY:
1793 	case CHIP_POLARIS11:
1794 	case CHIP_POLARIS10:
1795 	case CHIP_POLARIS12:
1796 	case CHIP_VEGAM:
1797 	case CHIP_VEGA10:
1798 	case CHIP_VEGA12:
1799 	case CHIP_VEGA20:
1800 		return 0;
1801 	case CHIP_NAVI12:
1802 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1803 		break;
1804 	case CHIP_RAVEN:
1805 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1806 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1807 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1808 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1809 		else
1810 			return 0;
1811 		break;
1812 	default:
1813 		switch (adev->ip_versions[DCE_HWIP][0]) {
1814 		case IP_VERSION(2, 0, 2):
1815 		case IP_VERSION(2, 0, 3):
1816 		case IP_VERSION(2, 0, 0):
1817 		case IP_VERSION(2, 1, 0):
1818 		case IP_VERSION(3, 0, 0):
1819 		case IP_VERSION(3, 0, 2):
1820 		case IP_VERSION(3, 0, 3):
1821 		case IP_VERSION(3, 0, 1):
1822 		case IP_VERSION(3, 1, 2):
1823 		case IP_VERSION(3, 1, 3):
1824 		case IP_VERSION(3, 1, 5):
1825 		case IP_VERSION(3, 1, 6):
1826 			return 0;
1827 		default:
1828 			break;
1829 		}
1830 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1831 		return -EINVAL;
1832 	}
1833 
1834 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1835 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1836 		return 0;
1837 	}
1838 
1839 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1840 	if (r == -ENOENT) {
1841 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1842 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1843 		adev->dm.fw_dmcu = NULL;
1844 		return 0;
1845 	}
1846 	if (r) {
1847 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1848 			fw_name_dmcu);
1849 		return r;
1850 	}
1851 
1852 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1853 	if (r) {
1854 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1855 			fw_name_dmcu);
1856 		release_firmware(adev->dm.fw_dmcu);
1857 		adev->dm.fw_dmcu = NULL;
1858 		return r;
1859 	}
1860 
1861 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1862 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1863 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1864 	adev->firmware.fw_size +=
1865 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1866 
1867 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1868 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1869 	adev->firmware.fw_size +=
1870 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1871 
1872 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1873 
1874 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1875 
1876 	return 0;
1877 }
1878 
1879 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1880 {
1881 	struct amdgpu_device *adev = ctx;
1882 
1883 	return dm_read_reg(adev->dm.dc->ctx, address);
1884 }
1885 
1886 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1887 				     uint32_t value)
1888 {
1889 	struct amdgpu_device *adev = ctx;
1890 
1891 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1892 }
1893 
1894 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1895 {
1896 	struct dmub_srv_create_params create_params;
1897 	struct dmub_srv_region_params region_params;
1898 	struct dmub_srv_region_info region_info;
1899 	struct dmub_srv_fb_params fb_params;
1900 	struct dmub_srv_fb_info *fb_info;
1901 	struct dmub_srv *dmub_srv;
1902 	const struct dmcub_firmware_header_v1_0 *hdr;
1903 	const char *fw_name_dmub;
1904 	enum dmub_asic dmub_asic;
1905 	enum dmub_status status;
1906 	int r;
1907 
1908 	switch (adev->ip_versions[DCE_HWIP][0]) {
1909 	case IP_VERSION(2, 1, 0):
1910 		dmub_asic = DMUB_ASIC_DCN21;
1911 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1912 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1913 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1914 		break;
1915 	case IP_VERSION(3, 0, 0):
1916 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1917 			dmub_asic = DMUB_ASIC_DCN30;
1918 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1919 		} else {
1920 			dmub_asic = DMUB_ASIC_DCN30;
1921 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1922 		}
1923 		break;
1924 	case IP_VERSION(3, 0, 1):
1925 		dmub_asic = DMUB_ASIC_DCN301;
1926 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1927 		break;
1928 	case IP_VERSION(3, 0, 2):
1929 		dmub_asic = DMUB_ASIC_DCN302;
1930 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1931 		break;
1932 	case IP_VERSION(3, 0, 3):
1933 		dmub_asic = DMUB_ASIC_DCN303;
1934 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1935 		break;
1936 	case IP_VERSION(3, 1, 2):
1937 	case IP_VERSION(3, 1, 3):
1938 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1939 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1940 		break;
1941 	case IP_VERSION(3, 1, 5):
1942 		dmub_asic = DMUB_ASIC_DCN315;
1943 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1944 		break;
1945 	case IP_VERSION(3, 1, 6):
1946 		dmub_asic = DMUB_ASIC_DCN316;
1947 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1948 		break;
1949 	default:
1950 		/* ASIC doesn't support DMUB. */
1951 		return 0;
1952 	}
1953 
1954 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1955 	if (r) {
1956 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1957 		return 0;
1958 	}
1959 
1960 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1961 	if (r) {
1962 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1963 		return 0;
1964 	}
1965 
1966 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1967 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1968 
1969 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1970 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1971 			AMDGPU_UCODE_ID_DMCUB;
1972 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1973 			adev->dm.dmub_fw;
1974 		adev->firmware.fw_size +=
1975 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1976 
1977 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1978 			 adev->dm.dmcub_fw_version);
1979 	}
1980 
1981 
1982 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1983 	dmub_srv = adev->dm.dmub_srv;
1984 
1985 	if (!dmub_srv) {
1986 		DRM_ERROR("Failed to allocate DMUB service!\n");
1987 		return -ENOMEM;
1988 	}
1989 
1990 	memset(&create_params, 0, sizeof(create_params));
1991 	create_params.user_ctx = adev;
1992 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1993 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1994 	create_params.asic = dmub_asic;
1995 
1996 	/* Create the DMUB service. */
1997 	status = dmub_srv_create(dmub_srv, &create_params);
1998 	if (status != DMUB_STATUS_OK) {
1999 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2000 		return -EINVAL;
2001 	}
2002 
2003 	/* Calculate the size of all the regions for the DMUB service. */
2004 	memset(&region_params, 0, sizeof(region_params));
2005 
2006 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2007 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2008 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2009 	region_params.vbios_size = adev->bios_size;
2010 	region_params.fw_bss_data = region_params.bss_data_size ?
2011 		adev->dm.dmub_fw->data +
2012 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2013 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2014 	region_params.fw_inst_const =
2015 		adev->dm.dmub_fw->data +
2016 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2017 		PSP_HEADER_BYTES;
2018 
2019 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2020 					   &region_info);
2021 
2022 	if (status != DMUB_STATUS_OK) {
2023 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2024 		return -EINVAL;
2025 	}
2026 
2027 	/*
2028 	 * Allocate a framebuffer based on the total size of all the regions.
2029 	 * TODO: Move this into GART.
2030 	 */
2031 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2032 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2033 				    &adev->dm.dmub_bo_gpu_addr,
2034 				    &adev->dm.dmub_bo_cpu_addr);
2035 	if (r)
2036 		return r;
2037 
2038 	/* Rebase the regions on the framebuffer address. */
2039 	memset(&fb_params, 0, sizeof(fb_params));
2040 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2041 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2042 	fb_params.region_info = &region_info;
2043 
2044 	adev->dm.dmub_fb_info =
2045 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2046 	fb_info = adev->dm.dmub_fb_info;
2047 
2048 	if (!fb_info) {
2049 		DRM_ERROR(
2050 			"Failed to allocate framebuffer info for DMUB service!\n");
2051 		return -ENOMEM;
2052 	}
2053 
2054 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2055 	if (status != DMUB_STATUS_OK) {
2056 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2057 		return -EINVAL;
2058 	}
2059 
2060 	return 0;
2061 }
2062 
2063 static int dm_sw_init(void *handle)
2064 {
2065 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2066 	int r;
2067 
2068 	r = dm_dmub_sw_init(adev);
2069 	if (r)
2070 		return r;
2071 
2072 	return load_dmcu_fw(adev);
2073 }
2074 
2075 static int dm_sw_fini(void *handle)
2076 {
2077 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2078 
2079 	kfree(adev->dm.dmub_fb_info);
2080 	adev->dm.dmub_fb_info = NULL;
2081 
2082 	if (adev->dm.dmub_srv) {
2083 		dmub_srv_destroy(adev->dm.dmub_srv);
2084 		adev->dm.dmub_srv = NULL;
2085 	}
2086 
2087 	release_firmware(adev->dm.dmub_fw);
2088 	adev->dm.dmub_fw = NULL;
2089 
2090 	release_firmware(adev->dm.fw_dmcu);
2091 	adev->dm.fw_dmcu = NULL;
2092 
2093 	return 0;
2094 }
2095 
2096 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2097 {
2098 	struct amdgpu_dm_connector *aconnector;
2099 	struct drm_connector *connector;
2100 	struct drm_connector_list_iter iter;
2101 	int ret = 0;
2102 
2103 	drm_connector_list_iter_begin(dev, &iter);
2104 	drm_for_each_connector_iter(connector, &iter) {
2105 		aconnector = to_amdgpu_dm_connector(connector);
2106 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2107 		    aconnector->mst_mgr.aux) {
2108 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2109 					 aconnector,
2110 					 aconnector->base.base.id);
2111 
2112 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2113 			if (ret < 0) {
2114 				DRM_ERROR("DM_MST: Failed to start MST\n");
2115 				aconnector->dc_link->type =
2116 					dc_connection_single;
2117 				break;
2118 			}
2119 		}
2120 	}
2121 	drm_connector_list_iter_end(&iter);
2122 
2123 	return ret;
2124 }
2125 
2126 static int dm_late_init(void *handle)
2127 {
2128 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2129 
2130 	struct dmcu_iram_parameters params;
2131 	unsigned int linear_lut[16];
2132 	int i;
2133 	struct dmcu *dmcu = NULL;
2134 
2135 	dmcu = adev->dm.dc->res_pool->dmcu;
2136 
2137 	for (i = 0; i < 16; i++)
2138 		linear_lut[i] = 0xFFFF * i / 15;
2139 
2140 	params.set = 0;
2141 	params.backlight_ramping_override = false;
2142 	params.backlight_ramping_start = 0xCCCC;
2143 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2144 	params.backlight_lut_array_size = 16;
2145 	params.backlight_lut_array = linear_lut;
2146 
2147 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2148 	 * 0xFFFF x 0.01 = 0x28F
2149 	 */
2150 	params.min_abm_backlight = 0x28F;
2151 	/* In the case where abm is implemented on dmcub,
2152 	* dmcu object will be null.
2153 	* ABM 2.4 and up are implemented on dmcub.
2154 	*/
2155 	if (dmcu) {
2156 		if (!dmcu_load_iram(dmcu, params))
2157 			return -EINVAL;
2158 	} else if (adev->dm.dc->ctx->dmub_srv) {
2159 		struct dc_link *edp_links[MAX_NUM_EDP];
2160 		int edp_num;
2161 
2162 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2163 		for (i = 0; i < edp_num; i++) {
2164 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2165 				return -EINVAL;
2166 		}
2167 	}
2168 
2169 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2170 }
2171 
2172 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2173 {
2174 	struct amdgpu_dm_connector *aconnector;
2175 	struct drm_connector *connector;
2176 	struct drm_connector_list_iter iter;
2177 	struct drm_dp_mst_topology_mgr *mgr;
2178 	int ret;
2179 	bool need_hotplug = false;
2180 
2181 	drm_connector_list_iter_begin(dev, &iter);
2182 	drm_for_each_connector_iter(connector, &iter) {
2183 		aconnector = to_amdgpu_dm_connector(connector);
2184 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2185 		    aconnector->mst_port)
2186 			continue;
2187 
2188 		mgr = &aconnector->mst_mgr;
2189 
2190 		if (suspend) {
2191 			drm_dp_mst_topology_mgr_suspend(mgr);
2192 		} else {
2193 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2194 			if (ret < 0) {
2195 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2196 				need_hotplug = true;
2197 			}
2198 		}
2199 	}
2200 	drm_connector_list_iter_end(&iter);
2201 
2202 	if (need_hotplug)
2203 		drm_kms_helper_hotplug_event(dev);
2204 }
2205 
2206 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2207 {
2208 	int ret = 0;
2209 
2210 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2211 	 * on window driver dc implementation.
2212 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2213 	 * should be passed to smu during boot up and resume from s3.
2214 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2215 	 * dcn20_resource_construct
2216 	 * then call pplib functions below to pass the settings to smu:
2217 	 * smu_set_watermarks_for_clock_ranges
2218 	 * smu_set_watermarks_table
2219 	 * navi10_set_watermarks_table
2220 	 * smu_write_watermarks_table
2221 	 *
2222 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2223 	 * dc has implemented different flow for window driver:
2224 	 * dc_hardware_init / dc_set_power_state
2225 	 * dcn10_init_hw
2226 	 * notify_wm_ranges
2227 	 * set_wm_ranges
2228 	 * -- Linux
2229 	 * smu_set_watermarks_for_clock_ranges
2230 	 * renoir_set_watermarks_table
2231 	 * smu_write_watermarks_table
2232 	 *
2233 	 * For Linux,
2234 	 * dc_hardware_init -> amdgpu_dm_init
2235 	 * dc_set_power_state --> dm_resume
2236 	 *
2237 	 * therefore, this function apply to navi10/12/14 but not Renoir
2238 	 * *
2239 	 */
2240 	switch (adev->ip_versions[DCE_HWIP][0]) {
2241 	case IP_VERSION(2, 0, 2):
2242 	case IP_VERSION(2, 0, 0):
2243 		break;
2244 	default:
2245 		return 0;
2246 	}
2247 
2248 	ret = amdgpu_dpm_write_watermarks_table(adev);
2249 	if (ret) {
2250 		DRM_ERROR("Failed to update WMTABLE!\n");
2251 		return ret;
2252 	}
2253 
2254 	return 0;
2255 }
2256 
2257 /**
2258  * dm_hw_init() - Initialize DC device
2259  * @handle: The base driver device containing the amdgpu_dm device.
2260  *
2261  * Initialize the &struct amdgpu_display_manager device. This involves calling
2262  * the initializers of each DM component, then populating the struct with them.
2263  *
2264  * Although the function implies hardware initialization, both hardware and
2265  * software are initialized here. Splitting them out to their relevant init
2266  * hooks is a future TODO item.
2267  *
2268  * Some notable things that are initialized here:
2269  *
2270  * - Display Core, both software and hardware
2271  * - DC modules that we need (freesync and color management)
2272  * - DRM software states
2273  * - Interrupt sources and handlers
2274  * - Vblank support
2275  * - Debug FS entries, if enabled
2276  */
2277 static int dm_hw_init(void *handle)
2278 {
2279 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2280 	/* Create DAL display manager */
2281 	amdgpu_dm_init(adev);
2282 	amdgpu_dm_hpd_init(adev);
2283 
2284 	return 0;
2285 }
2286 
2287 /**
2288  * dm_hw_fini() - Teardown DC device
2289  * @handle: The base driver device containing the amdgpu_dm device.
2290  *
2291  * Teardown components within &struct amdgpu_display_manager that require
2292  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2293  * were loaded. Also flush IRQ workqueues and disable them.
2294  */
2295 static int dm_hw_fini(void *handle)
2296 {
2297 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2298 
2299 	amdgpu_dm_hpd_fini(adev);
2300 
2301 	amdgpu_dm_irq_fini(adev);
2302 	amdgpu_dm_fini(adev);
2303 	return 0;
2304 }
2305 
2306 
2307 static int dm_enable_vblank(struct drm_crtc *crtc);
2308 static void dm_disable_vblank(struct drm_crtc *crtc);
2309 
2310 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2311 				 struct dc_state *state, bool enable)
2312 {
2313 	enum dc_irq_source irq_source;
2314 	struct amdgpu_crtc *acrtc;
2315 	int rc = -EBUSY;
2316 	int i = 0;
2317 
2318 	for (i = 0; i < state->stream_count; i++) {
2319 		acrtc = get_crtc_by_otg_inst(
2320 				adev, state->stream_status[i].primary_otg_inst);
2321 
2322 		if (acrtc && state->stream_status[i].plane_count != 0) {
2323 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2324 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2325 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2326 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2327 			if (rc)
2328 				DRM_WARN("Failed to %s pflip interrupts\n",
2329 					 enable ? "enable" : "disable");
2330 
2331 			if (enable) {
2332 				rc = dm_enable_vblank(&acrtc->base);
2333 				if (rc)
2334 					DRM_WARN("Failed to enable vblank interrupts\n");
2335 			} else {
2336 				dm_disable_vblank(&acrtc->base);
2337 			}
2338 
2339 		}
2340 	}
2341 
2342 }
2343 
2344 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2345 {
2346 	struct dc_state *context = NULL;
2347 	enum dc_status res = DC_ERROR_UNEXPECTED;
2348 	int i;
2349 	struct dc_stream_state *del_streams[MAX_PIPES];
2350 	int del_streams_count = 0;
2351 
2352 	memset(del_streams, 0, sizeof(del_streams));
2353 
2354 	context = dc_create_state(dc);
2355 	if (context == NULL)
2356 		goto context_alloc_fail;
2357 
2358 	dc_resource_state_copy_construct_current(dc, context);
2359 
2360 	/* First remove from context all streams */
2361 	for (i = 0; i < context->stream_count; i++) {
2362 		struct dc_stream_state *stream = context->streams[i];
2363 
2364 		del_streams[del_streams_count++] = stream;
2365 	}
2366 
2367 	/* Remove all planes for removed streams and then remove the streams */
2368 	for (i = 0; i < del_streams_count; i++) {
2369 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2370 			res = DC_FAIL_DETACH_SURFACES;
2371 			goto fail;
2372 		}
2373 
2374 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2375 		if (res != DC_OK)
2376 			goto fail;
2377 	}
2378 
2379 	res = dc_commit_state(dc, context);
2380 
2381 fail:
2382 	dc_release_state(context);
2383 
2384 context_alloc_fail:
2385 	return res;
2386 }
2387 
2388 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2389 {
2390 	int i;
2391 
2392 	if (dm->hpd_rx_offload_wq) {
2393 		for (i = 0; i < dm->dc->caps.max_links; i++)
2394 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2395 	}
2396 }
2397 
2398 static int dm_suspend(void *handle)
2399 {
2400 	struct amdgpu_device *adev = handle;
2401 	struct amdgpu_display_manager *dm = &adev->dm;
2402 	int ret = 0;
2403 
2404 	if (amdgpu_in_reset(adev)) {
2405 		mutex_lock(&dm->dc_lock);
2406 
2407 #if defined(CONFIG_DRM_AMD_DC_DCN)
2408 		dc_allow_idle_optimizations(adev->dm.dc, false);
2409 #endif
2410 
2411 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2412 
2413 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2414 
2415 		amdgpu_dm_commit_zero_streams(dm->dc);
2416 
2417 		amdgpu_dm_irq_suspend(adev);
2418 
2419 		hpd_rx_irq_work_suspend(dm);
2420 
2421 		return ret;
2422 	}
2423 
2424 	WARN_ON(adev->dm.cached_state);
2425 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2426 
2427 	s3_handle_mst(adev_to_drm(adev), true);
2428 
2429 	amdgpu_dm_irq_suspend(adev);
2430 
2431 	hpd_rx_irq_work_suspend(dm);
2432 
2433 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2434 
2435 	return 0;
2436 }
2437 
2438 struct amdgpu_dm_connector *
2439 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2440 					     struct drm_crtc *crtc)
2441 {
2442 	uint32_t i;
2443 	struct drm_connector_state *new_con_state;
2444 	struct drm_connector *connector;
2445 	struct drm_crtc *crtc_from_state;
2446 
2447 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2448 		crtc_from_state = new_con_state->crtc;
2449 
2450 		if (crtc_from_state == crtc)
2451 			return to_amdgpu_dm_connector(connector);
2452 	}
2453 
2454 	return NULL;
2455 }
2456 
2457 static void emulated_link_detect(struct dc_link *link)
2458 {
2459 	struct dc_sink_init_data sink_init_data = { 0 };
2460 	struct display_sink_capability sink_caps = { 0 };
2461 	enum dc_edid_status edid_status;
2462 	struct dc_context *dc_ctx = link->ctx;
2463 	struct dc_sink *sink = NULL;
2464 	struct dc_sink *prev_sink = NULL;
2465 
2466 	link->type = dc_connection_none;
2467 	prev_sink = link->local_sink;
2468 
2469 	if (prev_sink)
2470 		dc_sink_release(prev_sink);
2471 
2472 	switch (link->connector_signal) {
2473 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2474 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2475 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2476 		break;
2477 	}
2478 
2479 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2480 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2481 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2482 		break;
2483 	}
2484 
2485 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2486 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2487 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2488 		break;
2489 	}
2490 
2491 	case SIGNAL_TYPE_LVDS: {
2492 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2493 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2494 		break;
2495 	}
2496 
2497 	case SIGNAL_TYPE_EDP: {
2498 		sink_caps.transaction_type =
2499 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2500 		sink_caps.signal = SIGNAL_TYPE_EDP;
2501 		break;
2502 	}
2503 
2504 	case SIGNAL_TYPE_DISPLAY_PORT: {
2505 		sink_caps.transaction_type =
2506 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2507 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2508 		break;
2509 	}
2510 
2511 	default:
2512 		DC_ERROR("Invalid connector type! signal:%d\n",
2513 			link->connector_signal);
2514 		return;
2515 	}
2516 
2517 	sink_init_data.link = link;
2518 	sink_init_data.sink_signal = sink_caps.signal;
2519 
2520 	sink = dc_sink_create(&sink_init_data);
2521 	if (!sink) {
2522 		DC_ERROR("Failed to create sink!\n");
2523 		return;
2524 	}
2525 
2526 	/* dc_sink_create returns a new reference */
2527 	link->local_sink = sink;
2528 
2529 	edid_status = dm_helpers_read_local_edid(
2530 			link->ctx,
2531 			link,
2532 			sink);
2533 
2534 	if (edid_status != EDID_OK)
2535 		DC_ERROR("Failed to read EDID");
2536 
2537 }
2538 
2539 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2540 				     struct amdgpu_display_manager *dm)
2541 {
2542 	struct {
2543 		struct dc_surface_update surface_updates[MAX_SURFACES];
2544 		struct dc_plane_info plane_infos[MAX_SURFACES];
2545 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2546 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2547 		struct dc_stream_update stream_update;
2548 	} * bundle;
2549 	int k, m;
2550 
2551 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2552 
2553 	if (!bundle) {
2554 		dm_error("Failed to allocate update bundle\n");
2555 		goto cleanup;
2556 	}
2557 
2558 	for (k = 0; k < dc_state->stream_count; k++) {
2559 		bundle->stream_update.stream = dc_state->streams[k];
2560 
2561 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2562 			bundle->surface_updates[m].surface =
2563 				dc_state->stream_status->plane_states[m];
2564 			bundle->surface_updates[m].surface->force_full_update =
2565 				true;
2566 		}
2567 		dc_commit_updates_for_stream(
2568 			dm->dc, bundle->surface_updates,
2569 			dc_state->stream_status->plane_count,
2570 			dc_state->streams[k], &bundle->stream_update, dc_state);
2571 	}
2572 
2573 cleanup:
2574 	kfree(bundle);
2575 
2576 	return;
2577 }
2578 
2579 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2580 {
2581 	struct dc_stream_state *stream_state;
2582 	struct amdgpu_dm_connector *aconnector = link->priv;
2583 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2584 	struct dc_stream_update stream_update;
2585 	bool dpms_off = true;
2586 
2587 	memset(&stream_update, 0, sizeof(stream_update));
2588 	stream_update.dpms_off = &dpms_off;
2589 
2590 	mutex_lock(&adev->dm.dc_lock);
2591 	stream_state = dc_stream_find_from_link(link);
2592 
2593 	if (stream_state == NULL) {
2594 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2595 		mutex_unlock(&adev->dm.dc_lock);
2596 		return;
2597 	}
2598 
2599 	stream_update.stream = stream_state;
2600 	acrtc_state->force_dpms_off = true;
2601 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2602 				     stream_state, &stream_update,
2603 				     stream_state->ctx->dc->current_state);
2604 	mutex_unlock(&adev->dm.dc_lock);
2605 }
2606 
2607 static int dm_resume(void *handle)
2608 {
2609 	struct amdgpu_device *adev = handle;
2610 	struct drm_device *ddev = adev_to_drm(adev);
2611 	struct amdgpu_display_manager *dm = &adev->dm;
2612 	struct amdgpu_dm_connector *aconnector;
2613 	struct drm_connector *connector;
2614 	struct drm_connector_list_iter iter;
2615 	struct drm_crtc *crtc;
2616 	struct drm_crtc_state *new_crtc_state;
2617 	struct dm_crtc_state *dm_new_crtc_state;
2618 	struct drm_plane *plane;
2619 	struct drm_plane_state *new_plane_state;
2620 	struct dm_plane_state *dm_new_plane_state;
2621 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2622 	enum dc_connection_type new_connection_type = dc_connection_none;
2623 	struct dc_state *dc_state;
2624 	int i, r, j;
2625 
2626 	if (amdgpu_in_reset(adev)) {
2627 		dc_state = dm->cached_dc_state;
2628 
2629 		/*
2630 		 * The dc->current_state is backed up into dm->cached_dc_state
2631 		 * before we commit 0 streams.
2632 		 *
2633 		 * DC will clear link encoder assignments on the real state
2634 		 * but the changes won't propagate over to the copy we made
2635 		 * before the 0 streams commit.
2636 		 *
2637 		 * DC expects that link encoder assignments are *not* valid
2638 		 * when committing a state, so as a workaround we can copy
2639 		 * off of the current state.
2640 		 *
2641 		 * We lose the previous assignments, but we had already
2642 		 * commit 0 streams anyway.
2643 		 */
2644 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2645 
2646 		if (dc_enable_dmub_notifications(adev->dm.dc))
2647 			amdgpu_dm_outbox_init(adev);
2648 
2649 		r = dm_dmub_hw_init(adev);
2650 		if (r)
2651 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2652 
2653 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2654 		dc_resume(dm->dc);
2655 
2656 		amdgpu_dm_irq_resume_early(adev);
2657 
2658 		for (i = 0; i < dc_state->stream_count; i++) {
2659 			dc_state->streams[i]->mode_changed = true;
2660 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2661 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2662 					= 0xffffffff;
2663 			}
2664 		}
2665 
2666 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2667 
2668 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2669 
2670 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2671 
2672 		dc_release_state(dm->cached_dc_state);
2673 		dm->cached_dc_state = NULL;
2674 
2675 		amdgpu_dm_irq_resume_late(adev);
2676 
2677 		mutex_unlock(&dm->dc_lock);
2678 
2679 		return 0;
2680 	}
2681 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2682 	dc_release_state(dm_state->context);
2683 	dm_state->context = dc_create_state(dm->dc);
2684 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2685 	dc_resource_state_construct(dm->dc, dm_state->context);
2686 
2687 	/* Re-enable outbox interrupts for DPIA. */
2688 	if (dc_enable_dmub_notifications(adev->dm.dc))
2689 		amdgpu_dm_outbox_init(adev);
2690 
2691 	/* Before powering on DC we need to re-initialize DMUB. */
2692 	dm_dmub_hw_resume(adev);
2693 
2694 	/* power on hardware */
2695 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2696 
2697 	/* program HPD filter */
2698 	dc_resume(dm->dc);
2699 
2700 	/*
2701 	 * early enable HPD Rx IRQ, should be done before set mode as short
2702 	 * pulse interrupts are used for MST
2703 	 */
2704 	amdgpu_dm_irq_resume_early(adev);
2705 
2706 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2707 	s3_handle_mst(ddev, false);
2708 
2709 	/* Do detection*/
2710 	drm_connector_list_iter_begin(ddev, &iter);
2711 	drm_for_each_connector_iter(connector, &iter) {
2712 		aconnector = to_amdgpu_dm_connector(connector);
2713 
2714 		/*
2715 		 * this is the case when traversing through already created
2716 		 * MST connectors, should be skipped
2717 		 */
2718 		if (aconnector->dc_link &&
2719 		    aconnector->dc_link->type == dc_connection_mst_branch)
2720 			continue;
2721 
2722 		mutex_lock(&aconnector->hpd_lock);
2723 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2724 			DRM_ERROR("KMS: Failed to detect connector\n");
2725 
2726 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2727 			emulated_link_detect(aconnector->dc_link);
2728 		else
2729 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2730 
2731 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2732 			aconnector->fake_enable = false;
2733 
2734 		if (aconnector->dc_sink)
2735 			dc_sink_release(aconnector->dc_sink);
2736 		aconnector->dc_sink = NULL;
2737 		amdgpu_dm_update_connector_after_detect(aconnector);
2738 		mutex_unlock(&aconnector->hpd_lock);
2739 	}
2740 	drm_connector_list_iter_end(&iter);
2741 
2742 	/* Force mode set in atomic commit */
2743 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2744 		new_crtc_state->active_changed = true;
2745 
2746 	/*
2747 	 * atomic_check is expected to create the dc states. We need to release
2748 	 * them here, since they were duplicated as part of the suspend
2749 	 * procedure.
2750 	 */
2751 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2752 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2753 		if (dm_new_crtc_state->stream) {
2754 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2755 			dc_stream_release(dm_new_crtc_state->stream);
2756 			dm_new_crtc_state->stream = NULL;
2757 		}
2758 	}
2759 
2760 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2761 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2762 		if (dm_new_plane_state->dc_state) {
2763 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2764 			dc_plane_state_release(dm_new_plane_state->dc_state);
2765 			dm_new_plane_state->dc_state = NULL;
2766 		}
2767 	}
2768 
2769 	drm_atomic_helper_resume(ddev, dm->cached_state);
2770 
2771 	dm->cached_state = NULL;
2772 
2773 	amdgpu_dm_irq_resume_late(adev);
2774 
2775 	amdgpu_dm_smu_write_watermarks_table(adev);
2776 
2777 	return 0;
2778 }
2779 
2780 /**
2781  * DOC: DM Lifecycle
2782  *
2783  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2784  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2785  * the base driver's device list to be initialized and torn down accordingly.
2786  *
2787  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2788  */
2789 
2790 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2791 	.name = "dm",
2792 	.early_init = dm_early_init,
2793 	.late_init = dm_late_init,
2794 	.sw_init = dm_sw_init,
2795 	.sw_fini = dm_sw_fini,
2796 	.early_fini = amdgpu_dm_early_fini,
2797 	.hw_init = dm_hw_init,
2798 	.hw_fini = dm_hw_fini,
2799 	.suspend = dm_suspend,
2800 	.resume = dm_resume,
2801 	.is_idle = dm_is_idle,
2802 	.wait_for_idle = dm_wait_for_idle,
2803 	.check_soft_reset = dm_check_soft_reset,
2804 	.soft_reset = dm_soft_reset,
2805 	.set_clockgating_state = dm_set_clockgating_state,
2806 	.set_powergating_state = dm_set_powergating_state,
2807 };
2808 
2809 const struct amdgpu_ip_block_version dm_ip_block =
2810 {
2811 	.type = AMD_IP_BLOCK_TYPE_DCE,
2812 	.major = 1,
2813 	.minor = 0,
2814 	.rev = 0,
2815 	.funcs = &amdgpu_dm_funcs,
2816 };
2817 
2818 
2819 /**
2820  * DOC: atomic
2821  *
2822  * *WIP*
2823  */
2824 
2825 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2826 	.fb_create = amdgpu_display_user_framebuffer_create,
2827 	.get_format_info = amd_get_format_info,
2828 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2829 	.atomic_check = amdgpu_dm_atomic_check,
2830 	.atomic_commit = drm_atomic_helper_commit,
2831 };
2832 
2833 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2834 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2835 };
2836 
2837 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2838 {
2839 	u32 max_cll, min_cll, max, min, q, r;
2840 	struct amdgpu_dm_backlight_caps *caps;
2841 	struct amdgpu_display_manager *dm;
2842 	struct drm_connector *conn_base;
2843 	struct amdgpu_device *adev;
2844 	struct dc_link *link = NULL;
2845 	static const u8 pre_computed_values[] = {
2846 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2847 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2848 	int i;
2849 
2850 	if (!aconnector || !aconnector->dc_link)
2851 		return;
2852 
2853 	link = aconnector->dc_link;
2854 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2855 		return;
2856 
2857 	conn_base = &aconnector->base;
2858 	adev = drm_to_adev(conn_base->dev);
2859 	dm = &adev->dm;
2860 	for (i = 0; i < dm->num_of_edps; i++) {
2861 		if (link == dm->backlight_link[i])
2862 			break;
2863 	}
2864 	if (i >= dm->num_of_edps)
2865 		return;
2866 	caps = &dm->backlight_caps[i];
2867 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2868 	caps->aux_support = false;
2869 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2870 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2871 
2872 	if (caps->ext_caps->bits.oled == 1 /*||
2873 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2874 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2875 		caps->aux_support = true;
2876 
2877 	if (amdgpu_backlight == 0)
2878 		caps->aux_support = false;
2879 	else if (amdgpu_backlight == 1)
2880 		caps->aux_support = true;
2881 
2882 	/* From the specification (CTA-861-G), for calculating the maximum
2883 	 * luminance we need to use:
2884 	 *	Luminance = 50*2**(CV/32)
2885 	 * Where CV is a one-byte value.
2886 	 * For calculating this expression we may need float point precision;
2887 	 * to avoid this complexity level, we take advantage that CV is divided
2888 	 * by a constant. From the Euclids division algorithm, we know that CV
2889 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2890 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2891 	 * need to pre-compute the value of r/32. For pre-computing the values
2892 	 * We just used the following Ruby line:
2893 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2894 	 * The results of the above expressions can be verified at
2895 	 * pre_computed_values.
2896 	 */
2897 	q = max_cll >> 5;
2898 	r = max_cll % 32;
2899 	max = (1 << q) * pre_computed_values[r];
2900 
2901 	// min luminance: maxLum * (CV/255)^2 / 100
2902 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2903 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2904 
2905 	caps->aux_max_input_signal = max;
2906 	caps->aux_min_input_signal = min;
2907 }
2908 
2909 void amdgpu_dm_update_connector_after_detect(
2910 		struct amdgpu_dm_connector *aconnector)
2911 {
2912 	struct drm_connector *connector = &aconnector->base;
2913 	struct drm_device *dev = connector->dev;
2914 	struct dc_sink *sink;
2915 
2916 	/* MST handled by drm_mst framework */
2917 	if (aconnector->mst_mgr.mst_state == true)
2918 		return;
2919 
2920 	sink = aconnector->dc_link->local_sink;
2921 	if (sink)
2922 		dc_sink_retain(sink);
2923 
2924 	/*
2925 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2926 	 * the connector sink is set to either fake or physical sink depends on link status.
2927 	 * Skip if already done during boot.
2928 	 */
2929 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2930 			&& aconnector->dc_em_sink) {
2931 
2932 		/*
2933 		 * For S3 resume with headless use eml_sink to fake stream
2934 		 * because on resume connector->sink is set to NULL
2935 		 */
2936 		mutex_lock(&dev->mode_config.mutex);
2937 
2938 		if (sink) {
2939 			if (aconnector->dc_sink) {
2940 				amdgpu_dm_update_freesync_caps(connector, NULL);
2941 				/*
2942 				 * retain and release below are used to
2943 				 * bump up refcount for sink because the link doesn't point
2944 				 * to it anymore after disconnect, so on next crtc to connector
2945 				 * reshuffle by UMD we will get into unwanted dc_sink release
2946 				 */
2947 				dc_sink_release(aconnector->dc_sink);
2948 			}
2949 			aconnector->dc_sink = sink;
2950 			dc_sink_retain(aconnector->dc_sink);
2951 			amdgpu_dm_update_freesync_caps(connector,
2952 					aconnector->edid);
2953 		} else {
2954 			amdgpu_dm_update_freesync_caps(connector, NULL);
2955 			if (!aconnector->dc_sink) {
2956 				aconnector->dc_sink = aconnector->dc_em_sink;
2957 				dc_sink_retain(aconnector->dc_sink);
2958 			}
2959 		}
2960 
2961 		mutex_unlock(&dev->mode_config.mutex);
2962 
2963 		if (sink)
2964 			dc_sink_release(sink);
2965 		return;
2966 	}
2967 
2968 	/*
2969 	 * TODO: temporary guard to look for proper fix
2970 	 * if this sink is MST sink, we should not do anything
2971 	 */
2972 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2973 		dc_sink_release(sink);
2974 		return;
2975 	}
2976 
2977 	if (aconnector->dc_sink == sink) {
2978 		/*
2979 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2980 		 * Do nothing!!
2981 		 */
2982 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2983 				aconnector->connector_id);
2984 		if (sink)
2985 			dc_sink_release(sink);
2986 		return;
2987 	}
2988 
2989 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2990 		aconnector->connector_id, aconnector->dc_sink, sink);
2991 
2992 	mutex_lock(&dev->mode_config.mutex);
2993 
2994 	/*
2995 	 * 1. Update status of the drm connector
2996 	 * 2. Send an event and let userspace tell us what to do
2997 	 */
2998 	if (sink) {
2999 		/*
3000 		 * TODO: check if we still need the S3 mode update workaround.
3001 		 * If yes, put it here.
3002 		 */
3003 		if (aconnector->dc_sink) {
3004 			amdgpu_dm_update_freesync_caps(connector, NULL);
3005 			dc_sink_release(aconnector->dc_sink);
3006 		}
3007 
3008 		aconnector->dc_sink = sink;
3009 		dc_sink_retain(aconnector->dc_sink);
3010 		if (sink->dc_edid.length == 0) {
3011 			aconnector->edid = NULL;
3012 			if (aconnector->dc_link->aux_mode) {
3013 				drm_dp_cec_unset_edid(
3014 					&aconnector->dm_dp_aux.aux);
3015 			}
3016 		} else {
3017 			aconnector->edid =
3018 				(struct edid *)sink->dc_edid.raw_edid;
3019 
3020 			if (aconnector->dc_link->aux_mode)
3021 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3022 						    aconnector->edid);
3023 		}
3024 
3025 		drm_connector_update_edid_property(connector, aconnector->edid);
3026 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3027 		update_connector_ext_caps(aconnector);
3028 	} else {
3029 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3030 		amdgpu_dm_update_freesync_caps(connector, NULL);
3031 		drm_connector_update_edid_property(connector, NULL);
3032 		aconnector->num_modes = 0;
3033 		dc_sink_release(aconnector->dc_sink);
3034 		aconnector->dc_sink = NULL;
3035 		aconnector->edid = NULL;
3036 #ifdef CONFIG_DRM_AMD_DC_HDCP
3037 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3038 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3039 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3040 #endif
3041 	}
3042 
3043 	mutex_unlock(&dev->mode_config.mutex);
3044 
3045 	update_subconnector_property(aconnector);
3046 
3047 	if (sink)
3048 		dc_sink_release(sink);
3049 }
3050 
3051 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3052 {
3053 	struct drm_connector *connector = &aconnector->base;
3054 	struct drm_device *dev = connector->dev;
3055 	enum dc_connection_type new_connection_type = dc_connection_none;
3056 	struct amdgpu_device *adev = drm_to_adev(dev);
3057 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3058 	struct dm_crtc_state *dm_crtc_state = NULL;
3059 
3060 	if (adev->dm.disable_hpd_irq)
3061 		return;
3062 
3063 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3064 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3065 					dm_con_state->base.state,
3066 					dm_con_state->base.crtc));
3067 	/*
3068 	 * In case of failure or MST no need to update connector status or notify the OS
3069 	 * since (for MST case) MST does this in its own context.
3070 	 */
3071 	mutex_lock(&aconnector->hpd_lock);
3072 
3073 #ifdef CONFIG_DRM_AMD_DC_HDCP
3074 	if (adev->dm.hdcp_workqueue) {
3075 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3076 		dm_con_state->update_hdcp = true;
3077 	}
3078 #endif
3079 	if (aconnector->fake_enable)
3080 		aconnector->fake_enable = false;
3081 
3082 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3083 		DRM_ERROR("KMS: Failed to detect connector\n");
3084 
3085 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3086 		emulated_link_detect(aconnector->dc_link);
3087 
3088 		drm_modeset_lock_all(dev);
3089 		dm_restore_drm_connector_state(dev, connector);
3090 		drm_modeset_unlock_all(dev);
3091 
3092 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3093 			drm_kms_helper_connector_hotplug_event(connector);
3094 
3095 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3096 		if (new_connection_type == dc_connection_none &&
3097 		    aconnector->dc_link->type == dc_connection_none &&
3098 		    dm_crtc_state)
3099 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3100 
3101 		amdgpu_dm_update_connector_after_detect(aconnector);
3102 
3103 		drm_modeset_lock_all(dev);
3104 		dm_restore_drm_connector_state(dev, connector);
3105 		drm_modeset_unlock_all(dev);
3106 
3107 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3108 			drm_kms_helper_connector_hotplug_event(connector);
3109 	}
3110 	mutex_unlock(&aconnector->hpd_lock);
3111 
3112 }
3113 
3114 static void handle_hpd_irq(void *param)
3115 {
3116 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3117 
3118 	handle_hpd_irq_helper(aconnector);
3119 
3120 }
3121 
3122 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3123 {
3124 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3125 	uint8_t dret;
3126 	bool new_irq_handled = false;
3127 	int dpcd_addr;
3128 	int dpcd_bytes_to_read;
3129 
3130 	const int max_process_count = 30;
3131 	int process_count = 0;
3132 
3133 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3134 
3135 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3136 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3137 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3138 		dpcd_addr = DP_SINK_COUNT;
3139 	} else {
3140 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3141 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3142 		dpcd_addr = DP_SINK_COUNT_ESI;
3143 	}
3144 
3145 	dret = drm_dp_dpcd_read(
3146 		&aconnector->dm_dp_aux.aux,
3147 		dpcd_addr,
3148 		esi,
3149 		dpcd_bytes_to_read);
3150 
3151 	while (dret == dpcd_bytes_to_read &&
3152 		process_count < max_process_count) {
3153 		uint8_t retry;
3154 		dret = 0;
3155 
3156 		process_count++;
3157 
3158 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3159 		/* handle HPD short pulse irq */
3160 		if (aconnector->mst_mgr.mst_state)
3161 			drm_dp_mst_hpd_irq(
3162 				&aconnector->mst_mgr,
3163 				esi,
3164 				&new_irq_handled);
3165 
3166 		if (new_irq_handled) {
3167 			/* ACK at DPCD to notify down stream */
3168 			const int ack_dpcd_bytes_to_write =
3169 				dpcd_bytes_to_read - 1;
3170 
3171 			for (retry = 0; retry < 3; retry++) {
3172 				uint8_t wret;
3173 
3174 				wret = drm_dp_dpcd_write(
3175 					&aconnector->dm_dp_aux.aux,
3176 					dpcd_addr + 1,
3177 					&esi[1],
3178 					ack_dpcd_bytes_to_write);
3179 				if (wret == ack_dpcd_bytes_to_write)
3180 					break;
3181 			}
3182 
3183 			/* check if there is new irq to be handled */
3184 			dret = drm_dp_dpcd_read(
3185 				&aconnector->dm_dp_aux.aux,
3186 				dpcd_addr,
3187 				esi,
3188 				dpcd_bytes_to_read);
3189 
3190 			new_irq_handled = false;
3191 		} else {
3192 			break;
3193 		}
3194 	}
3195 
3196 	if (process_count == max_process_count)
3197 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3198 }
3199 
3200 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3201 							union hpd_irq_data hpd_irq_data)
3202 {
3203 	struct hpd_rx_irq_offload_work *offload_work =
3204 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3205 
3206 	if (!offload_work) {
3207 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3208 		return;
3209 	}
3210 
3211 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3212 	offload_work->data = hpd_irq_data;
3213 	offload_work->offload_wq = offload_wq;
3214 
3215 	queue_work(offload_wq->wq, &offload_work->work);
3216 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3217 }
3218 
3219 static void handle_hpd_rx_irq(void *param)
3220 {
3221 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3222 	struct drm_connector *connector = &aconnector->base;
3223 	struct drm_device *dev = connector->dev;
3224 	struct dc_link *dc_link = aconnector->dc_link;
3225 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3226 	bool result = false;
3227 	enum dc_connection_type new_connection_type = dc_connection_none;
3228 	struct amdgpu_device *adev = drm_to_adev(dev);
3229 	union hpd_irq_data hpd_irq_data;
3230 	bool link_loss = false;
3231 	bool has_left_work = false;
3232 	int idx = aconnector->base.index;
3233 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3234 
3235 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3236 
3237 	if (adev->dm.disable_hpd_irq)
3238 		return;
3239 
3240 	/*
3241 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3242 	 * conflict, after implement i2c helper, this mutex should be
3243 	 * retired.
3244 	 */
3245 	mutex_lock(&aconnector->hpd_lock);
3246 
3247 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3248 						&link_loss, true, &has_left_work);
3249 
3250 	if (!has_left_work)
3251 		goto out;
3252 
3253 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3254 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3255 		goto out;
3256 	}
3257 
3258 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3259 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3260 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3261 			dm_handle_mst_sideband_msg(aconnector);
3262 			goto out;
3263 		}
3264 
3265 		if (link_loss) {
3266 			bool skip = false;
3267 
3268 			spin_lock(&offload_wq->offload_lock);
3269 			skip = offload_wq->is_handling_link_loss;
3270 
3271 			if (!skip)
3272 				offload_wq->is_handling_link_loss = true;
3273 
3274 			spin_unlock(&offload_wq->offload_lock);
3275 
3276 			if (!skip)
3277 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3278 
3279 			goto out;
3280 		}
3281 	}
3282 
3283 out:
3284 	if (result && !is_mst_root_connector) {
3285 		/* Downstream Port status changed. */
3286 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3287 			DRM_ERROR("KMS: Failed to detect connector\n");
3288 
3289 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3290 			emulated_link_detect(dc_link);
3291 
3292 			if (aconnector->fake_enable)
3293 				aconnector->fake_enable = false;
3294 
3295 			amdgpu_dm_update_connector_after_detect(aconnector);
3296 
3297 
3298 			drm_modeset_lock_all(dev);
3299 			dm_restore_drm_connector_state(dev, connector);
3300 			drm_modeset_unlock_all(dev);
3301 
3302 			drm_kms_helper_connector_hotplug_event(connector);
3303 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3304 
3305 			if (aconnector->fake_enable)
3306 				aconnector->fake_enable = false;
3307 
3308 			amdgpu_dm_update_connector_after_detect(aconnector);
3309 
3310 
3311 			drm_modeset_lock_all(dev);
3312 			dm_restore_drm_connector_state(dev, connector);
3313 			drm_modeset_unlock_all(dev);
3314 
3315 			drm_kms_helper_connector_hotplug_event(connector);
3316 		}
3317 	}
3318 #ifdef CONFIG_DRM_AMD_DC_HDCP
3319 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3320 		if (adev->dm.hdcp_workqueue)
3321 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3322 	}
3323 #endif
3324 
3325 	if (dc_link->type != dc_connection_mst_branch)
3326 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3327 
3328 	mutex_unlock(&aconnector->hpd_lock);
3329 }
3330 
3331 static void register_hpd_handlers(struct amdgpu_device *adev)
3332 {
3333 	struct drm_device *dev = adev_to_drm(adev);
3334 	struct drm_connector *connector;
3335 	struct amdgpu_dm_connector *aconnector;
3336 	const struct dc_link *dc_link;
3337 	struct dc_interrupt_params int_params = {0};
3338 
3339 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3340 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3341 
3342 	list_for_each_entry(connector,
3343 			&dev->mode_config.connector_list, head)	{
3344 
3345 		aconnector = to_amdgpu_dm_connector(connector);
3346 		dc_link = aconnector->dc_link;
3347 
3348 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3349 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3350 			int_params.irq_source = dc_link->irq_source_hpd;
3351 
3352 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3353 					handle_hpd_irq,
3354 					(void *) aconnector);
3355 		}
3356 
3357 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3358 
3359 			/* Also register for DP short pulse (hpd_rx). */
3360 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3361 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3362 
3363 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3364 					handle_hpd_rx_irq,
3365 					(void *) aconnector);
3366 
3367 			if (adev->dm.hpd_rx_offload_wq)
3368 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3369 					aconnector;
3370 		}
3371 	}
3372 }
3373 
3374 #if defined(CONFIG_DRM_AMD_DC_SI)
3375 /* Register IRQ sources and initialize IRQ callbacks */
3376 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3377 {
3378 	struct dc *dc = adev->dm.dc;
3379 	struct common_irq_params *c_irq_params;
3380 	struct dc_interrupt_params int_params = {0};
3381 	int r;
3382 	int i;
3383 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3384 
3385 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3386 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3387 
3388 	/*
3389 	 * Actions of amdgpu_irq_add_id():
3390 	 * 1. Register a set() function with base driver.
3391 	 *    Base driver will call set() function to enable/disable an
3392 	 *    interrupt in DC hardware.
3393 	 * 2. Register amdgpu_dm_irq_handler().
3394 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3395 	 *    coming from DC hardware.
3396 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3397 	 *    for acknowledging and handling. */
3398 
3399 	/* Use VBLANK interrupt */
3400 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3401 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3402 		if (r) {
3403 			DRM_ERROR("Failed to add crtc irq id!\n");
3404 			return r;
3405 		}
3406 
3407 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3408 		int_params.irq_source =
3409 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3410 
3411 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3412 
3413 		c_irq_params->adev = adev;
3414 		c_irq_params->irq_src = int_params.irq_source;
3415 
3416 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3417 				dm_crtc_high_irq, c_irq_params);
3418 	}
3419 
3420 	/* Use GRPH_PFLIP interrupt */
3421 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3422 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3423 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3424 		if (r) {
3425 			DRM_ERROR("Failed to add page flip irq id!\n");
3426 			return r;
3427 		}
3428 
3429 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3430 		int_params.irq_source =
3431 			dc_interrupt_to_irq_source(dc, i, 0);
3432 
3433 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3434 
3435 		c_irq_params->adev = adev;
3436 		c_irq_params->irq_src = int_params.irq_source;
3437 
3438 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3439 				dm_pflip_high_irq, c_irq_params);
3440 
3441 	}
3442 
3443 	/* HPD */
3444 	r = amdgpu_irq_add_id(adev, client_id,
3445 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3446 	if (r) {
3447 		DRM_ERROR("Failed to add hpd irq id!\n");
3448 		return r;
3449 	}
3450 
3451 	register_hpd_handlers(adev);
3452 
3453 	return 0;
3454 }
3455 #endif
3456 
3457 /* Register IRQ sources and initialize IRQ callbacks */
3458 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3459 {
3460 	struct dc *dc = adev->dm.dc;
3461 	struct common_irq_params *c_irq_params;
3462 	struct dc_interrupt_params int_params = {0};
3463 	int r;
3464 	int i;
3465 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3466 
3467 	if (adev->family >= AMDGPU_FAMILY_AI)
3468 		client_id = SOC15_IH_CLIENTID_DCE;
3469 
3470 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3471 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3472 
3473 	/*
3474 	 * Actions of amdgpu_irq_add_id():
3475 	 * 1. Register a set() function with base driver.
3476 	 *    Base driver will call set() function to enable/disable an
3477 	 *    interrupt in DC hardware.
3478 	 * 2. Register amdgpu_dm_irq_handler().
3479 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3480 	 *    coming from DC hardware.
3481 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3482 	 *    for acknowledging and handling. */
3483 
3484 	/* Use VBLANK interrupt */
3485 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3486 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3487 		if (r) {
3488 			DRM_ERROR("Failed to add crtc irq id!\n");
3489 			return r;
3490 		}
3491 
3492 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3493 		int_params.irq_source =
3494 			dc_interrupt_to_irq_source(dc, i, 0);
3495 
3496 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3497 
3498 		c_irq_params->adev = adev;
3499 		c_irq_params->irq_src = int_params.irq_source;
3500 
3501 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3502 				dm_crtc_high_irq, c_irq_params);
3503 	}
3504 
3505 	/* Use VUPDATE interrupt */
3506 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3507 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3508 		if (r) {
3509 			DRM_ERROR("Failed to add vupdate irq id!\n");
3510 			return r;
3511 		}
3512 
3513 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3514 		int_params.irq_source =
3515 			dc_interrupt_to_irq_source(dc, i, 0);
3516 
3517 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3518 
3519 		c_irq_params->adev = adev;
3520 		c_irq_params->irq_src = int_params.irq_source;
3521 
3522 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3523 				dm_vupdate_high_irq, c_irq_params);
3524 	}
3525 
3526 	/* Use GRPH_PFLIP interrupt */
3527 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3528 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3529 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3530 		if (r) {
3531 			DRM_ERROR("Failed to add page flip irq id!\n");
3532 			return r;
3533 		}
3534 
3535 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3536 		int_params.irq_source =
3537 			dc_interrupt_to_irq_source(dc, i, 0);
3538 
3539 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3540 
3541 		c_irq_params->adev = adev;
3542 		c_irq_params->irq_src = int_params.irq_source;
3543 
3544 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3545 				dm_pflip_high_irq, c_irq_params);
3546 
3547 	}
3548 
3549 	/* HPD */
3550 	r = amdgpu_irq_add_id(adev, client_id,
3551 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3552 	if (r) {
3553 		DRM_ERROR("Failed to add hpd irq id!\n");
3554 		return r;
3555 	}
3556 
3557 	register_hpd_handlers(adev);
3558 
3559 	return 0;
3560 }
3561 
3562 #if defined(CONFIG_DRM_AMD_DC_DCN)
3563 /* Register IRQ sources and initialize IRQ callbacks */
3564 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3565 {
3566 	struct dc *dc = adev->dm.dc;
3567 	struct common_irq_params *c_irq_params;
3568 	struct dc_interrupt_params int_params = {0};
3569 	int r;
3570 	int i;
3571 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3572 	static const unsigned int vrtl_int_srcid[] = {
3573 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3574 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3575 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3576 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3577 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3578 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3579 	};
3580 #endif
3581 
3582 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3583 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3584 
3585 	/*
3586 	 * Actions of amdgpu_irq_add_id():
3587 	 * 1. Register a set() function with base driver.
3588 	 *    Base driver will call set() function to enable/disable an
3589 	 *    interrupt in DC hardware.
3590 	 * 2. Register amdgpu_dm_irq_handler().
3591 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3592 	 *    coming from DC hardware.
3593 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3594 	 *    for acknowledging and handling.
3595 	 */
3596 
3597 	/* Use VSTARTUP interrupt */
3598 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3599 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3600 			i++) {
3601 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3602 
3603 		if (r) {
3604 			DRM_ERROR("Failed to add crtc irq id!\n");
3605 			return r;
3606 		}
3607 
3608 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3609 		int_params.irq_source =
3610 			dc_interrupt_to_irq_source(dc, i, 0);
3611 
3612 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3613 
3614 		c_irq_params->adev = adev;
3615 		c_irq_params->irq_src = int_params.irq_source;
3616 
3617 		amdgpu_dm_irq_register_interrupt(
3618 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3619 	}
3620 
3621 	/* Use otg vertical line interrupt */
3622 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3623 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3624 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3625 				vrtl_int_srcid[i], &adev->vline0_irq);
3626 
3627 		if (r) {
3628 			DRM_ERROR("Failed to add vline0 irq id!\n");
3629 			return r;
3630 		}
3631 
3632 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3633 		int_params.irq_source =
3634 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3635 
3636 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3637 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3638 			break;
3639 		}
3640 
3641 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3642 					- DC_IRQ_SOURCE_DC1_VLINE0];
3643 
3644 		c_irq_params->adev = adev;
3645 		c_irq_params->irq_src = int_params.irq_source;
3646 
3647 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3648 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3649 	}
3650 #endif
3651 
3652 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3653 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3654 	 * to trigger at end of each vblank, regardless of state of the lock,
3655 	 * matching DCE behaviour.
3656 	 */
3657 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3658 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3659 	     i++) {
3660 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3661 
3662 		if (r) {
3663 			DRM_ERROR("Failed to add vupdate irq id!\n");
3664 			return r;
3665 		}
3666 
3667 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3668 		int_params.irq_source =
3669 			dc_interrupt_to_irq_source(dc, i, 0);
3670 
3671 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3672 
3673 		c_irq_params->adev = adev;
3674 		c_irq_params->irq_src = int_params.irq_source;
3675 
3676 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3677 				dm_vupdate_high_irq, c_irq_params);
3678 	}
3679 
3680 	/* Use GRPH_PFLIP interrupt */
3681 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3682 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3683 			i++) {
3684 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3685 		if (r) {
3686 			DRM_ERROR("Failed to add page flip irq id!\n");
3687 			return r;
3688 		}
3689 
3690 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3691 		int_params.irq_source =
3692 			dc_interrupt_to_irq_source(dc, i, 0);
3693 
3694 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3695 
3696 		c_irq_params->adev = adev;
3697 		c_irq_params->irq_src = int_params.irq_source;
3698 
3699 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3700 				dm_pflip_high_irq, c_irq_params);
3701 
3702 	}
3703 
3704 	/* HPD */
3705 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3706 			&adev->hpd_irq);
3707 	if (r) {
3708 		DRM_ERROR("Failed to add hpd irq id!\n");
3709 		return r;
3710 	}
3711 
3712 	register_hpd_handlers(adev);
3713 
3714 	return 0;
3715 }
3716 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3717 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3718 {
3719 	struct dc *dc = adev->dm.dc;
3720 	struct common_irq_params *c_irq_params;
3721 	struct dc_interrupt_params int_params = {0};
3722 	int r, i;
3723 
3724 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3725 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3726 
3727 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3728 			&adev->dmub_outbox_irq);
3729 	if (r) {
3730 		DRM_ERROR("Failed to add outbox irq id!\n");
3731 		return r;
3732 	}
3733 
3734 	if (dc->ctx->dmub_srv) {
3735 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3736 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3737 		int_params.irq_source =
3738 		dc_interrupt_to_irq_source(dc, i, 0);
3739 
3740 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3741 
3742 		c_irq_params->adev = adev;
3743 		c_irq_params->irq_src = int_params.irq_source;
3744 
3745 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3746 				dm_dmub_outbox1_low_irq, c_irq_params);
3747 	}
3748 
3749 	return 0;
3750 }
3751 #endif
3752 
3753 /*
3754  * Acquires the lock for the atomic state object and returns
3755  * the new atomic state.
3756  *
3757  * This should only be called during atomic check.
3758  */
3759 int dm_atomic_get_state(struct drm_atomic_state *state,
3760 			struct dm_atomic_state **dm_state)
3761 {
3762 	struct drm_device *dev = state->dev;
3763 	struct amdgpu_device *adev = drm_to_adev(dev);
3764 	struct amdgpu_display_manager *dm = &adev->dm;
3765 	struct drm_private_state *priv_state;
3766 
3767 	if (*dm_state)
3768 		return 0;
3769 
3770 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3771 	if (IS_ERR(priv_state))
3772 		return PTR_ERR(priv_state);
3773 
3774 	*dm_state = to_dm_atomic_state(priv_state);
3775 
3776 	return 0;
3777 }
3778 
3779 static struct dm_atomic_state *
3780 dm_atomic_get_new_state(struct drm_atomic_state *state)
3781 {
3782 	struct drm_device *dev = state->dev;
3783 	struct amdgpu_device *adev = drm_to_adev(dev);
3784 	struct amdgpu_display_manager *dm = &adev->dm;
3785 	struct drm_private_obj *obj;
3786 	struct drm_private_state *new_obj_state;
3787 	int i;
3788 
3789 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3790 		if (obj->funcs == dm->atomic_obj.funcs)
3791 			return to_dm_atomic_state(new_obj_state);
3792 	}
3793 
3794 	return NULL;
3795 }
3796 
3797 static struct drm_private_state *
3798 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3799 {
3800 	struct dm_atomic_state *old_state, *new_state;
3801 
3802 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3803 	if (!new_state)
3804 		return NULL;
3805 
3806 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3807 
3808 	old_state = to_dm_atomic_state(obj->state);
3809 
3810 	if (old_state && old_state->context)
3811 		new_state->context = dc_copy_state(old_state->context);
3812 
3813 	if (!new_state->context) {
3814 		kfree(new_state);
3815 		return NULL;
3816 	}
3817 
3818 	return &new_state->base;
3819 }
3820 
3821 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3822 				    struct drm_private_state *state)
3823 {
3824 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3825 
3826 	if (dm_state && dm_state->context)
3827 		dc_release_state(dm_state->context);
3828 
3829 	kfree(dm_state);
3830 }
3831 
3832 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3833 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3834 	.atomic_destroy_state = dm_atomic_destroy_state,
3835 };
3836 
3837 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3838 {
3839 	struct dm_atomic_state *state;
3840 	int r;
3841 
3842 	adev->mode_info.mode_config_initialized = true;
3843 
3844 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3845 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3846 
3847 	adev_to_drm(adev)->mode_config.max_width = 16384;
3848 	adev_to_drm(adev)->mode_config.max_height = 16384;
3849 
3850 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3851 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3852 	/* indicates support for immediate flip */
3853 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3854 
3855 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3856 
3857 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3858 	if (!state)
3859 		return -ENOMEM;
3860 
3861 	state->context = dc_create_state(adev->dm.dc);
3862 	if (!state->context) {
3863 		kfree(state);
3864 		return -ENOMEM;
3865 	}
3866 
3867 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3868 
3869 	drm_atomic_private_obj_init(adev_to_drm(adev),
3870 				    &adev->dm.atomic_obj,
3871 				    &state->base,
3872 				    &dm_atomic_state_funcs);
3873 
3874 	r = amdgpu_display_modeset_create_props(adev);
3875 	if (r) {
3876 		dc_release_state(state->context);
3877 		kfree(state);
3878 		return r;
3879 	}
3880 
3881 	r = amdgpu_dm_audio_init(adev);
3882 	if (r) {
3883 		dc_release_state(state->context);
3884 		kfree(state);
3885 		return r;
3886 	}
3887 
3888 	return 0;
3889 }
3890 
3891 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3892 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3893 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3894 
3895 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3896 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3897 
3898 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3899 					    int bl_idx)
3900 {
3901 #if defined(CONFIG_ACPI)
3902 	struct amdgpu_dm_backlight_caps caps;
3903 
3904 	memset(&caps, 0, sizeof(caps));
3905 
3906 	if (dm->backlight_caps[bl_idx].caps_valid)
3907 		return;
3908 
3909 	amdgpu_acpi_get_backlight_caps(&caps);
3910 	if (caps.caps_valid) {
3911 		dm->backlight_caps[bl_idx].caps_valid = true;
3912 		if (caps.aux_support)
3913 			return;
3914 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3915 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3916 	} else {
3917 		dm->backlight_caps[bl_idx].min_input_signal =
3918 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3919 		dm->backlight_caps[bl_idx].max_input_signal =
3920 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3921 	}
3922 #else
3923 	if (dm->backlight_caps[bl_idx].aux_support)
3924 		return;
3925 
3926 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3927 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3928 #endif
3929 }
3930 
3931 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3932 				unsigned *min, unsigned *max)
3933 {
3934 	if (!caps)
3935 		return 0;
3936 
3937 	if (caps->aux_support) {
3938 		// Firmware limits are in nits, DC API wants millinits.
3939 		*max = 1000 * caps->aux_max_input_signal;
3940 		*min = 1000 * caps->aux_min_input_signal;
3941 	} else {
3942 		// Firmware limits are 8-bit, PWM control is 16-bit.
3943 		*max = 0x101 * caps->max_input_signal;
3944 		*min = 0x101 * caps->min_input_signal;
3945 	}
3946 	return 1;
3947 }
3948 
3949 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3950 					uint32_t brightness)
3951 {
3952 	unsigned min, max;
3953 
3954 	if (!get_brightness_range(caps, &min, &max))
3955 		return brightness;
3956 
3957 	// Rescale 0..255 to min..max
3958 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3959 				       AMDGPU_MAX_BL_LEVEL);
3960 }
3961 
3962 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3963 				      uint32_t brightness)
3964 {
3965 	unsigned min, max;
3966 
3967 	if (!get_brightness_range(caps, &min, &max))
3968 		return brightness;
3969 
3970 	if (brightness < min)
3971 		return 0;
3972 	// Rescale min..max to 0..255
3973 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3974 				 max - min);
3975 }
3976 
3977 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3978 					 int bl_idx,
3979 					 u32 user_brightness)
3980 {
3981 	struct amdgpu_dm_backlight_caps caps;
3982 	struct dc_link *link;
3983 	u32 brightness;
3984 	bool rc;
3985 
3986 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3987 	caps = dm->backlight_caps[bl_idx];
3988 
3989 	dm->brightness[bl_idx] = user_brightness;
3990 	/* update scratch register */
3991 	if (bl_idx == 0)
3992 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3993 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3994 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3995 
3996 	/* Change brightness based on AUX property */
3997 	if (caps.aux_support) {
3998 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3999 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4000 		if (!rc)
4001 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4002 	} else {
4003 		rc = dc_link_set_backlight_level(link, brightness, 0);
4004 		if (!rc)
4005 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4006 	}
4007 
4008 	if (rc)
4009 		dm->actual_brightness[bl_idx] = user_brightness;
4010 }
4011 
4012 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4013 {
4014 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4015 	int i;
4016 
4017 	for (i = 0; i < dm->num_of_edps; i++) {
4018 		if (bd == dm->backlight_dev[i])
4019 			break;
4020 	}
4021 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4022 		i = 0;
4023 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4024 
4025 	return 0;
4026 }
4027 
4028 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4029 					 int bl_idx)
4030 {
4031 	struct amdgpu_dm_backlight_caps caps;
4032 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4033 
4034 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4035 	caps = dm->backlight_caps[bl_idx];
4036 
4037 	if (caps.aux_support) {
4038 		u32 avg, peak;
4039 		bool rc;
4040 
4041 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4042 		if (!rc)
4043 			return dm->brightness[bl_idx];
4044 		return convert_brightness_to_user(&caps, avg);
4045 	} else {
4046 		int ret = dc_link_get_backlight_level(link);
4047 
4048 		if (ret == DC_ERROR_UNEXPECTED)
4049 			return dm->brightness[bl_idx];
4050 		return convert_brightness_to_user(&caps, ret);
4051 	}
4052 }
4053 
4054 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4055 {
4056 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4057 	int i;
4058 
4059 	for (i = 0; i < dm->num_of_edps; i++) {
4060 		if (bd == dm->backlight_dev[i])
4061 			break;
4062 	}
4063 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4064 		i = 0;
4065 	return amdgpu_dm_backlight_get_level(dm, i);
4066 }
4067 
4068 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4069 	.options = BL_CORE_SUSPENDRESUME,
4070 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4071 	.update_status	= amdgpu_dm_backlight_update_status,
4072 };
4073 
4074 static void
4075 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4076 {
4077 	char bl_name[16];
4078 	struct backlight_properties props = { 0 };
4079 
4080 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4081 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4082 
4083 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4084 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4085 	props.type = BACKLIGHT_RAW;
4086 
4087 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4088 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4089 
4090 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4091 								       adev_to_drm(dm->adev)->dev,
4092 								       dm,
4093 								       &amdgpu_dm_backlight_ops,
4094 								       &props);
4095 
4096 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4097 		DRM_ERROR("DM: Backlight registration failed!\n");
4098 	else
4099 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4100 }
4101 #endif
4102 
4103 static int initialize_plane(struct amdgpu_display_manager *dm,
4104 			    struct amdgpu_mode_info *mode_info, int plane_id,
4105 			    enum drm_plane_type plane_type,
4106 			    const struct dc_plane_cap *plane_cap)
4107 {
4108 	struct drm_plane *plane;
4109 	unsigned long possible_crtcs;
4110 	int ret = 0;
4111 
4112 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4113 	if (!plane) {
4114 		DRM_ERROR("KMS: Failed to allocate plane\n");
4115 		return -ENOMEM;
4116 	}
4117 	plane->type = plane_type;
4118 
4119 	/*
4120 	 * HACK: IGT tests expect that the primary plane for a CRTC
4121 	 * can only have one possible CRTC. Only expose support for
4122 	 * any CRTC if they're not going to be used as a primary plane
4123 	 * for a CRTC - like overlay or underlay planes.
4124 	 */
4125 	possible_crtcs = 1 << plane_id;
4126 	if (plane_id >= dm->dc->caps.max_streams)
4127 		possible_crtcs = 0xff;
4128 
4129 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4130 
4131 	if (ret) {
4132 		DRM_ERROR("KMS: Failed to initialize plane\n");
4133 		kfree(plane);
4134 		return ret;
4135 	}
4136 
4137 	if (mode_info)
4138 		mode_info->planes[plane_id] = plane;
4139 
4140 	return ret;
4141 }
4142 
4143 
4144 static void register_backlight_device(struct amdgpu_display_manager *dm,
4145 				      struct dc_link *link)
4146 {
4147 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4148 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4149 
4150 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4151 	    link->type != dc_connection_none) {
4152 		/*
4153 		 * Event if registration failed, we should continue with
4154 		 * DM initialization because not having a backlight control
4155 		 * is better then a black screen.
4156 		 */
4157 		if (!dm->backlight_dev[dm->num_of_edps])
4158 			amdgpu_dm_register_backlight_device(dm);
4159 
4160 		if (dm->backlight_dev[dm->num_of_edps]) {
4161 			dm->backlight_link[dm->num_of_edps] = link;
4162 			dm->num_of_edps++;
4163 		}
4164 	}
4165 #endif
4166 }
4167 
4168 
4169 /*
4170  * In this architecture, the association
4171  * connector -> encoder -> crtc
4172  * id not really requried. The crtc and connector will hold the
4173  * display_index as an abstraction to use with DAL component
4174  *
4175  * Returns 0 on success
4176  */
4177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4178 {
4179 	struct amdgpu_display_manager *dm = &adev->dm;
4180 	int32_t i;
4181 	struct amdgpu_dm_connector *aconnector = NULL;
4182 	struct amdgpu_encoder *aencoder = NULL;
4183 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4184 	uint32_t link_cnt;
4185 	int32_t primary_planes;
4186 	enum dc_connection_type new_connection_type = dc_connection_none;
4187 	const struct dc_plane_cap *plane;
4188 	bool psr_feature_enabled = false;
4189 
4190 	dm->display_indexes_num = dm->dc->caps.max_streams;
4191 	/* Update the actual used number of crtc */
4192 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4193 
4194 	link_cnt = dm->dc->caps.max_links;
4195 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4196 		DRM_ERROR("DM: Failed to initialize mode config\n");
4197 		return -EINVAL;
4198 	}
4199 
4200 	/* There is one primary plane per CRTC */
4201 	primary_planes = dm->dc->caps.max_streams;
4202 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4203 
4204 	/*
4205 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4206 	 * Order is reversed to match iteration order in atomic check.
4207 	 */
4208 	for (i = (primary_planes - 1); i >= 0; i--) {
4209 		plane = &dm->dc->caps.planes[i];
4210 
4211 		if (initialize_plane(dm, mode_info, i,
4212 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4213 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4214 			goto fail;
4215 		}
4216 	}
4217 
4218 	/*
4219 	 * Initialize overlay planes, index starting after primary planes.
4220 	 * These planes have a higher DRM index than the primary planes since
4221 	 * they should be considered as having a higher z-order.
4222 	 * Order is reversed to match iteration order in atomic check.
4223 	 *
4224 	 * Only support DCN for now, and only expose one so we don't encourage
4225 	 * userspace to use up all the pipes.
4226 	 */
4227 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4228 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4229 
4230 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4231 			continue;
4232 
4233 		if (!plane->blends_with_above || !plane->blends_with_below)
4234 			continue;
4235 
4236 		if (!plane->pixel_format_support.argb8888)
4237 			continue;
4238 
4239 		if (initialize_plane(dm, NULL, primary_planes + i,
4240 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4241 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4242 			goto fail;
4243 		}
4244 
4245 		/* Only create one overlay plane. */
4246 		break;
4247 	}
4248 
4249 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4250 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4251 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4252 			goto fail;
4253 		}
4254 
4255 #if defined(CONFIG_DRM_AMD_DC_DCN)
4256 	/* Use Outbox interrupt */
4257 	switch (adev->ip_versions[DCE_HWIP][0]) {
4258 	case IP_VERSION(3, 0, 0):
4259 	case IP_VERSION(3, 1, 2):
4260 	case IP_VERSION(3, 1, 3):
4261 	case IP_VERSION(3, 1, 5):
4262 	case IP_VERSION(3, 1, 6):
4263 	case IP_VERSION(2, 1, 0):
4264 		if (register_outbox_irq_handlers(dm->adev)) {
4265 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4266 			goto fail;
4267 		}
4268 		break;
4269 	default:
4270 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4271 			      adev->ip_versions[DCE_HWIP][0]);
4272 	}
4273 
4274 	/* Determine whether to enable PSR support by default. */
4275 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4276 		switch (adev->ip_versions[DCE_HWIP][0]) {
4277 		case IP_VERSION(3, 1, 2):
4278 		case IP_VERSION(3, 1, 3):
4279 		case IP_VERSION(3, 1, 5):
4280 		case IP_VERSION(3, 1, 6):
4281 			psr_feature_enabled = true;
4282 			break;
4283 		default:
4284 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4285 			break;
4286 		}
4287 	}
4288 #endif
4289 
4290 	/* Disable vblank IRQs aggressively for power-saving. */
4291 	adev_to_drm(adev)->vblank_disable_immediate = true;
4292 
4293 	/* loops over all connectors on the board */
4294 	for (i = 0; i < link_cnt; i++) {
4295 		struct dc_link *link = NULL;
4296 
4297 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4298 			DRM_ERROR(
4299 				"KMS: Cannot support more than %d display indexes\n",
4300 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4301 			continue;
4302 		}
4303 
4304 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4305 		if (!aconnector)
4306 			goto fail;
4307 
4308 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4309 		if (!aencoder)
4310 			goto fail;
4311 
4312 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4313 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4314 			goto fail;
4315 		}
4316 
4317 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4318 			DRM_ERROR("KMS: Failed to initialize connector\n");
4319 			goto fail;
4320 		}
4321 
4322 		link = dc_get_link_at_index(dm->dc, i);
4323 
4324 		if (!dc_link_detect_sink(link, &new_connection_type))
4325 			DRM_ERROR("KMS: Failed to detect connector\n");
4326 
4327 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4328 			emulated_link_detect(link);
4329 			amdgpu_dm_update_connector_after_detect(aconnector);
4330 
4331 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4332 			amdgpu_dm_update_connector_after_detect(aconnector);
4333 			register_backlight_device(dm, link);
4334 			if (dm->num_of_edps)
4335 				update_connector_ext_caps(aconnector);
4336 			if (psr_feature_enabled)
4337 				amdgpu_dm_set_psr_caps(link);
4338 
4339 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4340 			 * PSR is also supported.
4341 			 */
4342 			if (link->psr_settings.psr_feature_enabled)
4343 				adev_to_drm(adev)->vblank_disable_immediate = false;
4344 		}
4345 
4346 
4347 	}
4348 
4349 	/* Software is initialized. Now we can register interrupt handlers. */
4350 	switch (adev->asic_type) {
4351 #if defined(CONFIG_DRM_AMD_DC_SI)
4352 	case CHIP_TAHITI:
4353 	case CHIP_PITCAIRN:
4354 	case CHIP_VERDE:
4355 	case CHIP_OLAND:
4356 		if (dce60_register_irq_handlers(dm->adev)) {
4357 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4358 			goto fail;
4359 		}
4360 		break;
4361 #endif
4362 	case CHIP_BONAIRE:
4363 	case CHIP_HAWAII:
4364 	case CHIP_KAVERI:
4365 	case CHIP_KABINI:
4366 	case CHIP_MULLINS:
4367 	case CHIP_TONGA:
4368 	case CHIP_FIJI:
4369 	case CHIP_CARRIZO:
4370 	case CHIP_STONEY:
4371 	case CHIP_POLARIS11:
4372 	case CHIP_POLARIS10:
4373 	case CHIP_POLARIS12:
4374 	case CHIP_VEGAM:
4375 	case CHIP_VEGA10:
4376 	case CHIP_VEGA12:
4377 	case CHIP_VEGA20:
4378 		if (dce110_register_irq_handlers(dm->adev)) {
4379 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4380 			goto fail;
4381 		}
4382 		break;
4383 	default:
4384 #if defined(CONFIG_DRM_AMD_DC_DCN)
4385 		switch (adev->ip_versions[DCE_HWIP][0]) {
4386 		case IP_VERSION(1, 0, 0):
4387 		case IP_VERSION(1, 0, 1):
4388 		case IP_VERSION(2, 0, 2):
4389 		case IP_VERSION(2, 0, 3):
4390 		case IP_VERSION(2, 0, 0):
4391 		case IP_VERSION(2, 1, 0):
4392 		case IP_VERSION(3, 0, 0):
4393 		case IP_VERSION(3, 0, 2):
4394 		case IP_VERSION(3, 0, 3):
4395 		case IP_VERSION(3, 0, 1):
4396 		case IP_VERSION(3, 1, 2):
4397 		case IP_VERSION(3, 1, 3):
4398 		case IP_VERSION(3, 1, 5):
4399 		case IP_VERSION(3, 1, 6):
4400 			if (dcn10_register_irq_handlers(dm->adev)) {
4401 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4402 				goto fail;
4403 			}
4404 			break;
4405 		default:
4406 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4407 					adev->ip_versions[DCE_HWIP][0]);
4408 			goto fail;
4409 		}
4410 #endif
4411 		break;
4412 	}
4413 
4414 	return 0;
4415 fail:
4416 	kfree(aencoder);
4417 	kfree(aconnector);
4418 
4419 	return -EINVAL;
4420 }
4421 
4422 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4423 {
4424 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4425 	return;
4426 }
4427 
4428 /******************************************************************************
4429  * amdgpu_display_funcs functions
4430  *****************************************************************************/
4431 
4432 /*
4433  * dm_bandwidth_update - program display watermarks
4434  *
4435  * @adev: amdgpu_device pointer
4436  *
4437  * Calculate and program the display watermarks and line buffer allocation.
4438  */
4439 static void dm_bandwidth_update(struct amdgpu_device *adev)
4440 {
4441 	/* TODO: implement later */
4442 }
4443 
4444 static const struct amdgpu_display_funcs dm_display_funcs = {
4445 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4446 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4447 	.backlight_set_level = NULL, /* never called for DC */
4448 	.backlight_get_level = NULL, /* never called for DC */
4449 	.hpd_sense = NULL,/* called unconditionally */
4450 	.hpd_set_polarity = NULL, /* called unconditionally */
4451 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4452 	.page_flip_get_scanoutpos =
4453 		dm_crtc_get_scanoutpos,/* called unconditionally */
4454 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4455 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4456 };
4457 
4458 #if defined(CONFIG_DEBUG_KERNEL_DC)
4459 
4460 static ssize_t s3_debug_store(struct device *device,
4461 			      struct device_attribute *attr,
4462 			      const char *buf,
4463 			      size_t count)
4464 {
4465 	int ret;
4466 	int s3_state;
4467 	struct drm_device *drm_dev = dev_get_drvdata(device);
4468 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4469 
4470 	ret = kstrtoint(buf, 0, &s3_state);
4471 
4472 	if (ret == 0) {
4473 		if (s3_state) {
4474 			dm_resume(adev);
4475 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4476 		} else
4477 			dm_suspend(adev);
4478 	}
4479 
4480 	return ret == 0 ? count : 0;
4481 }
4482 
4483 DEVICE_ATTR_WO(s3_debug);
4484 
4485 #endif
4486 
4487 static int dm_early_init(void *handle)
4488 {
4489 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4490 
4491 	switch (adev->asic_type) {
4492 #if defined(CONFIG_DRM_AMD_DC_SI)
4493 	case CHIP_TAHITI:
4494 	case CHIP_PITCAIRN:
4495 	case CHIP_VERDE:
4496 		adev->mode_info.num_crtc = 6;
4497 		adev->mode_info.num_hpd = 6;
4498 		adev->mode_info.num_dig = 6;
4499 		break;
4500 	case CHIP_OLAND:
4501 		adev->mode_info.num_crtc = 2;
4502 		adev->mode_info.num_hpd = 2;
4503 		adev->mode_info.num_dig = 2;
4504 		break;
4505 #endif
4506 	case CHIP_BONAIRE:
4507 	case CHIP_HAWAII:
4508 		adev->mode_info.num_crtc = 6;
4509 		adev->mode_info.num_hpd = 6;
4510 		adev->mode_info.num_dig = 6;
4511 		break;
4512 	case CHIP_KAVERI:
4513 		adev->mode_info.num_crtc = 4;
4514 		adev->mode_info.num_hpd = 6;
4515 		adev->mode_info.num_dig = 7;
4516 		break;
4517 	case CHIP_KABINI:
4518 	case CHIP_MULLINS:
4519 		adev->mode_info.num_crtc = 2;
4520 		adev->mode_info.num_hpd = 6;
4521 		adev->mode_info.num_dig = 6;
4522 		break;
4523 	case CHIP_FIJI:
4524 	case CHIP_TONGA:
4525 		adev->mode_info.num_crtc = 6;
4526 		adev->mode_info.num_hpd = 6;
4527 		adev->mode_info.num_dig = 7;
4528 		break;
4529 	case CHIP_CARRIZO:
4530 		adev->mode_info.num_crtc = 3;
4531 		adev->mode_info.num_hpd = 6;
4532 		adev->mode_info.num_dig = 9;
4533 		break;
4534 	case CHIP_STONEY:
4535 		adev->mode_info.num_crtc = 2;
4536 		adev->mode_info.num_hpd = 6;
4537 		adev->mode_info.num_dig = 9;
4538 		break;
4539 	case CHIP_POLARIS11:
4540 	case CHIP_POLARIS12:
4541 		adev->mode_info.num_crtc = 5;
4542 		adev->mode_info.num_hpd = 5;
4543 		adev->mode_info.num_dig = 5;
4544 		break;
4545 	case CHIP_POLARIS10:
4546 	case CHIP_VEGAM:
4547 		adev->mode_info.num_crtc = 6;
4548 		adev->mode_info.num_hpd = 6;
4549 		adev->mode_info.num_dig = 6;
4550 		break;
4551 	case CHIP_VEGA10:
4552 	case CHIP_VEGA12:
4553 	case CHIP_VEGA20:
4554 		adev->mode_info.num_crtc = 6;
4555 		adev->mode_info.num_hpd = 6;
4556 		adev->mode_info.num_dig = 6;
4557 		break;
4558 	default:
4559 #if defined(CONFIG_DRM_AMD_DC_DCN)
4560 		switch (adev->ip_versions[DCE_HWIP][0]) {
4561 		case IP_VERSION(2, 0, 2):
4562 		case IP_VERSION(3, 0, 0):
4563 			adev->mode_info.num_crtc = 6;
4564 			adev->mode_info.num_hpd = 6;
4565 			adev->mode_info.num_dig = 6;
4566 			break;
4567 		case IP_VERSION(2, 0, 0):
4568 		case IP_VERSION(3, 0, 2):
4569 			adev->mode_info.num_crtc = 5;
4570 			adev->mode_info.num_hpd = 5;
4571 			adev->mode_info.num_dig = 5;
4572 			break;
4573 		case IP_VERSION(2, 0, 3):
4574 		case IP_VERSION(3, 0, 3):
4575 			adev->mode_info.num_crtc = 2;
4576 			adev->mode_info.num_hpd = 2;
4577 			adev->mode_info.num_dig = 2;
4578 			break;
4579 		case IP_VERSION(1, 0, 0):
4580 		case IP_VERSION(1, 0, 1):
4581 		case IP_VERSION(3, 0, 1):
4582 		case IP_VERSION(2, 1, 0):
4583 		case IP_VERSION(3, 1, 2):
4584 		case IP_VERSION(3, 1, 3):
4585 		case IP_VERSION(3, 1, 5):
4586 		case IP_VERSION(3, 1, 6):
4587 			adev->mode_info.num_crtc = 4;
4588 			adev->mode_info.num_hpd = 4;
4589 			adev->mode_info.num_dig = 4;
4590 			break;
4591 		default:
4592 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4593 					adev->ip_versions[DCE_HWIP][0]);
4594 			return -EINVAL;
4595 		}
4596 #endif
4597 		break;
4598 	}
4599 
4600 	amdgpu_dm_set_irq_funcs(adev);
4601 
4602 	if (adev->mode_info.funcs == NULL)
4603 		adev->mode_info.funcs = &dm_display_funcs;
4604 
4605 	/*
4606 	 * Note: Do NOT change adev->audio_endpt_rreg and
4607 	 * adev->audio_endpt_wreg because they are initialised in
4608 	 * amdgpu_device_init()
4609 	 */
4610 #if defined(CONFIG_DEBUG_KERNEL_DC)
4611 	device_create_file(
4612 		adev_to_drm(adev)->dev,
4613 		&dev_attr_s3_debug);
4614 #endif
4615 
4616 	return 0;
4617 }
4618 
4619 static bool modeset_required(struct drm_crtc_state *crtc_state,
4620 			     struct dc_stream_state *new_stream,
4621 			     struct dc_stream_state *old_stream)
4622 {
4623 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4624 }
4625 
4626 static bool modereset_required(struct drm_crtc_state *crtc_state)
4627 {
4628 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4629 }
4630 
4631 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4632 {
4633 	drm_encoder_cleanup(encoder);
4634 	kfree(encoder);
4635 }
4636 
4637 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4638 	.destroy = amdgpu_dm_encoder_destroy,
4639 };
4640 
4641 
4642 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4643 					 struct drm_framebuffer *fb,
4644 					 int *min_downscale, int *max_upscale)
4645 {
4646 	struct amdgpu_device *adev = drm_to_adev(dev);
4647 	struct dc *dc = adev->dm.dc;
4648 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4649 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4650 
4651 	switch (fb->format->format) {
4652 	case DRM_FORMAT_P010:
4653 	case DRM_FORMAT_NV12:
4654 	case DRM_FORMAT_NV21:
4655 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4656 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4657 		break;
4658 
4659 	case DRM_FORMAT_XRGB16161616F:
4660 	case DRM_FORMAT_ARGB16161616F:
4661 	case DRM_FORMAT_XBGR16161616F:
4662 	case DRM_FORMAT_ABGR16161616F:
4663 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4664 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4665 		break;
4666 
4667 	default:
4668 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4669 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4670 		break;
4671 	}
4672 
4673 	/*
4674 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4675 	 * scaling factor of 1.0 == 1000 units.
4676 	 */
4677 	if (*max_upscale == 1)
4678 		*max_upscale = 1000;
4679 
4680 	if (*min_downscale == 1)
4681 		*min_downscale = 1000;
4682 }
4683 
4684 
4685 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4686 				const struct drm_plane_state *state,
4687 				struct dc_scaling_info *scaling_info)
4688 {
4689 	int scale_w, scale_h, min_downscale, max_upscale;
4690 
4691 	memset(scaling_info, 0, sizeof(*scaling_info));
4692 
4693 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4694 	scaling_info->src_rect.x = state->src_x >> 16;
4695 	scaling_info->src_rect.y = state->src_y >> 16;
4696 
4697 	/*
4698 	 * For reasons we don't (yet) fully understand a non-zero
4699 	 * src_y coordinate into an NV12 buffer can cause a
4700 	 * system hang on DCN1x.
4701 	 * To avoid hangs (and maybe be overly cautious)
4702 	 * let's reject both non-zero src_x and src_y.
4703 	 *
4704 	 * We currently know of only one use-case to reproduce a
4705 	 * scenario with non-zero src_x and src_y for NV12, which
4706 	 * is to gesture the YouTube Android app into full screen
4707 	 * on ChromeOS.
4708 	 */
4709 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4710 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4711 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4712 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4713 		return -EINVAL;
4714 
4715 	scaling_info->src_rect.width = state->src_w >> 16;
4716 	if (scaling_info->src_rect.width == 0)
4717 		return -EINVAL;
4718 
4719 	scaling_info->src_rect.height = state->src_h >> 16;
4720 	if (scaling_info->src_rect.height == 0)
4721 		return -EINVAL;
4722 
4723 	scaling_info->dst_rect.x = state->crtc_x;
4724 	scaling_info->dst_rect.y = state->crtc_y;
4725 
4726 	if (state->crtc_w == 0)
4727 		return -EINVAL;
4728 
4729 	scaling_info->dst_rect.width = state->crtc_w;
4730 
4731 	if (state->crtc_h == 0)
4732 		return -EINVAL;
4733 
4734 	scaling_info->dst_rect.height = state->crtc_h;
4735 
4736 	/* DRM doesn't specify clipping on destination output. */
4737 	scaling_info->clip_rect = scaling_info->dst_rect;
4738 
4739 	/* Validate scaling per-format with DC plane caps */
4740 	if (state->plane && state->plane->dev && state->fb) {
4741 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4742 					     &min_downscale, &max_upscale);
4743 	} else {
4744 		min_downscale = 250;
4745 		max_upscale = 16000;
4746 	}
4747 
4748 	scale_w = scaling_info->dst_rect.width * 1000 /
4749 		  scaling_info->src_rect.width;
4750 
4751 	if (scale_w < min_downscale || scale_w > max_upscale)
4752 		return -EINVAL;
4753 
4754 	scale_h = scaling_info->dst_rect.height * 1000 /
4755 		  scaling_info->src_rect.height;
4756 
4757 	if (scale_h < min_downscale || scale_h > max_upscale)
4758 		return -EINVAL;
4759 
4760 	/*
4761 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4762 	 * assume reasonable defaults based on the format.
4763 	 */
4764 
4765 	return 0;
4766 }
4767 
4768 static void
4769 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4770 				 uint64_t tiling_flags)
4771 {
4772 	/* Fill GFX8 params */
4773 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4774 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4775 
4776 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4777 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4778 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4779 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4780 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4781 
4782 		/* XXX fix me for VI */
4783 		tiling_info->gfx8.num_banks = num_banks;
4784 		tiling_info->gfx8.array_mode =
4785 				DC_ARRAY_2D_TILED_THIN1;
4786 		tiling_info->gfx8.tile_split = tile_split;
4787 		tiling_info->gfx8.bank_width = bankw;
4788 		tiling_info->gfx8.bank_height = bankh;
4789 		tiling_info->gfx8.tile_aspect = mtaspect;
4790 		tiling_info->gfx8.tile_mode =
4791 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4792 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4793 			== DC_ARRAY_1D_TILED_THIN1) {
4794 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4795 	}
4796 
4797 	tiling_info->gfx8.pipe_config =
4798 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4799 }
4800 
4801 static void
4802 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4803 				  union dc_tiling_info *tiling_info)
4804 {
4805 	tiling_info->gfx9.num_pipes =
4806 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4807 	tiling_info->gfx9.num_banks =
4808 		adev->gfx.config.gb_addr_config_fields.num_banks;
4809 	tiling_info->gfx9.pipe_interleave =
4810 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4811 	tiling_info->gfx9.num_shader_engines =
4812 		adev->gfx.config.gb_addr_config_fields.num_se;
4813 	tiling_info->gfx9.max_compressed_frags =
4814 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4815 	tiling_info->gfx9.num_rb_per_se =
4816 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4817 	tiling_info->gfx9.shaderEnable = 1;
4818 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4819 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4820 }
4821 
4822 static int
4823 validate_dcc(struct amdgpu_device *adev,
4824 	     const enum surface_pixel_format format,
4825 	     const enum dc_rotation_angle rotation,
4826 	     const union dc_tiling_info *tiling_info,
4827 	     const struct dc_plane_dcc_param *dcc,
4828 	     const struct dc_plane_address *address,
4829 	     const struct plane_size *plane_size)
4830 {
4831 	struct dc *dc = adev->dm.dc;
4832 	struct dc_dcc_surface_param input;
4833 	struct dc_surface_dcc_cap output;
4834 
4835 	memset(&input, 0, sizeof(input));
4836 	memset(&output, 0, sizeof(output));
4837 
4838 	if (!dcc->enable)
4839 		return 0;
4840 
4841 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4842 	    !dc->cap_funcs.get_dcc_compression_cap)
4843 		return -EINVAL;
4844 
4845 	input.format = format;
4846 	input.surface_size.width = plane_size->surface_size.width;
4847 	input.surface_size.height = plane_size->surface_size.height;
4848 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4849 
4850 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4851 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4852 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4853 		input.scan = SCAN_DIRECTION_VERTICAL;
4854 
4855 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4856 		return -EINVAL;
4857 
4858 	if (!output.capable)
4859 		return -EINVAL;
4860 
4861 	if (dcc->independent_64b_blks == 0 &&
4862 	    output.grph.rgb.independent_64b_blks != 0)
4863 		return -EINVAL;
4864 
4865 	return 0;
4866 }
4867 
4868 static bool
4869 modifier_has_dcc(uint64_t modifier)
4870 {
4871 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4872 }
4873 
4874 static unsigned
4875 modifier_gfx9_swizzle_mode(uint64_t modifier)
4876 {
4877 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4878 		return 0;
4879 
4880 	return AMD_FMT_MOD_GET(TILE, modifier);
4881 }
4882 
4883 static const struct drm_format_info *
4884 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4885 {
4886 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4887 }
4888 
4889 static void
4890 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4891 				    union dc_tiling_info *tiling_info,
4892 				    uint64_t modifier)
4893 {
4894 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4895 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4896 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4897 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4898 
4899 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4900 
4901 	if (!IS_AMD_FMT_MOD(modifier))
4902 		return;
4903 
4904 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4905 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4906 
4907 	if (adev->family >= AMDGPU_FAMILY_NV) {
4908 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4909 	} else {
4910 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4911 
4912 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4913 	}
4914 }
4915 
4916 enum dm_micro_swizzle {
4917 	MICRO_SWIZZLE_Z = 0,
4918 	MICRO_SWIZZLE_S = 1,
4919 	MICRO_SWIZZLE_D = 2,
4920 	MICRO_SWIZZLE_R = 3
4921 };
4922 
4923 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4924 					  uint32_t format,
4925 					  uint64_t modifier)
4926 {
4927 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4928 	const struct drm_format_info *info = drm_format_info(format);
4929 	int i;
4930 
4931 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4932 
4933 	if (!info)
4934 		return false;
4935 
4936 	/*
4937 	 * We always have to allow these modifiers:
4938 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4939 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4940 	 */
4941 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4942 	    modifier == DRM_FORMAT_MOD_INVALID) {
4943 		return true;
4944 	}
4945 
4946 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4947 	for (i = 0; i < plane->modifier_count; i++) {
4948 		if (modifier == plane->modifiers[i])
4949 			break;
4950 	}
4951 	if (i == plane->modifier_count)
4952 		return false;
4953 
4954 	/*
4955 	 * For D swizzle the canonical modifier depends on the bpp, so check
4956 	 * it here.
4957 	 */
4958 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4959 	    adev->family >= AMDGPU_FAMILY_NV) {
4960 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4961 			return false;
4962 	}
4963 
4964 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4965 	    info->cpp[0] < 8)
4966 		return false;
4967 
4968 	if (modifier_has_dcc(modifier)) {
4969 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4970 		if (info->cpp[0] != 4)
4971 			return false;
4972 		/* We support multi-planar formats, but not when combined with
4973 		 * additional DCC metadata planes. */
4974 		if (info->num_planes > 1)
4975 			return false;
4976 	}
4977 
4978 	return true;
4979 }
4980 
4981 static void
4982 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4983 {
4984 	if (!*mods)
4985 		return;
4986 
4987 	if (*cap - *size < 1) {
4988 		uint64_t new_cap = *cap * 2;
4989 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4990 
4991 		if (!new_mods) {
4992 			kfree(*mods);
4993 			*mods = NULL;
4994 			return;
4995 		}
4996 
4997 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4998 		kfree(*mods);
4999 		*mods = new_mods;
5000 		*cap = new_cap;
5001 	}
5002 
5003 	(*mods)[*size] = mod;
5004 	*size += 1;
5005 }
5006 
5007 static void
5008 add_gfx9_modifiers(const struct amdgpu_device *adev,
5009 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5010 {
5011 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5012 	int pipe_xor_bits = min(8, pipes +
5013 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5014 	int bank_xor_bits = min(8 - pipe_xor_bits,
5015 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5016 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5017 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5018 
5019 
5020 	if (adev->family == AMDGPU_FAMILY_RV) {
5021 		/* Raven2 and later */
5022 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5023 
5024 		/*
5025 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5026 		 * doesn't support _D on DCN
5027 		 */
5028 
5029 		if (has_constant_encode) {
5030 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5031 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5032 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5033 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5034 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5035 				    AMD_FMT_MOD_SET(DCC, 1) |
5036 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5037 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5038 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5039 		}
5040 
5041 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5042 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5043 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5044 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5045 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5046 			    AMD_FMT_MOD_SET(DCC, 1) |
5047 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5048 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5049 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5050 
5051 		if (has_constant_encode) {
5052 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5053 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5054 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5055 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5056 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5057 				    AMD_FMT_MOD_SET(DCC, 1) |
5058 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5059 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5060 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5061 
5062 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5063 				    AMD_FMT_MOD_SET(RB, rb) |
5064 				    AMD_FMT_MOD_SET(PIPE, pipes));
5065 		}
5066 
5067 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5068 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5069 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5070 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5071 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5072 			    AMD_FMT_MOD_SET(DCC, 1) |
5073 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5074 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5075 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5076 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5077 			    AMD_FMT_MOD_SET(RB, rb) |
5078 			    AMD_FMT_MOD_SET(PIPE, pipes));
5079 	}
5080 
5081 	/*
5082 	 * Only supported for 64bpp on Raven, will be filtered on format in
5083 	 * dm_plane_format_mod_supported.
5084 	 */
5085 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5086 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5087 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5088 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5089 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5090 
5091 	if (adev->family == AMDGPU_FAMILY_RV) {
5092 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5093 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5094 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5095 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5096 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5097 	}
5098 
5099 	/*
5100 	 * Only supported for 64bpp on Raven, will be filtered on format in
5101 	 * dm_plane_format_mod_supported.
5102 	 */
5103 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5104 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5105 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5106 
5107 	if (adev->family == AMDGPU_FAMILY_RV) {
5108 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5110 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5111 	}
5112 }
5113 
5114 static void
5115 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5116 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5117 {
5118 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5119 
5120 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5121 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5122 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5123 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5124 		    AMD_FMT_MOD_SET(DCC, 1) |
5125 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5126 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5127 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5128 
5129 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5130 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5131 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5132 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5133 		    AMD_FMT_MOD_SET(DCC, 1) |
5134 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5135 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5136 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5137 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5138 
5139 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5140 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5141 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5142 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5143 
5144 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5145 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5146 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5147 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5148 
5149 
5150 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5151 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5152 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5153 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5154 
5155 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5156 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5157 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5158 }
5159 
5160 static void
5161 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5162 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5163 {
5164 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5165 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5166 
5167 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5168 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5169 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5170 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5171 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5172 		    AMD_FMT_MOD_SET(DCC, 1) |
5173 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5174 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5175 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5176 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5177 
5178 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5179 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5180 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5181 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5182 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5183 		    AMD_FMT_MOD_SET(DCC, 1) |
5184 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5185 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5186 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5187 
5188 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5189 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5190 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5191 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5192 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5193 		    AMD_FMT_MOD_SET(DCC, 1) |
5194 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5195 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5196 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5197 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5198 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5199 
5200 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5201 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5202 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5203 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5204 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5205 		    AMD_FMT_MOD_SET(DCC, 1) |
5206 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5207 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5208 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5209 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5210 
5211 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5212 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5213 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5214 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5215 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5216 
5217 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5218 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5219 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5220 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5221 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5222 
5223 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5224 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5225 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5226 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5227 
5228 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5229 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5230 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5231 }
5232 
5233 static int
5234 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5235 {
5236 	uint64_t size = 0, capacity = 128;
5237 	*mods = NULL;
5238 
5239 	/* We have not hooked up any pre-GFX9 modifiers. */
5240 	if (adev->family < AMDGPU_FAMILY_AI)
5241 		return 0;
5242 
5243 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5244 
5245 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5246 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5247 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5248 		return *mods ? 0 : -ENOMEM;
5249 	}
5250 
5251 	switch (adev->family) {
5252 	case AMDGPU_FAMILY_AI:
5253 	case AMDGPU_FAMILY_RV:
5254 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5255 		break;
5256 	case AMDGPU_FAMILY_NV:
5257 	case AMDGPU_FAMILY_VGH:
5258 	case AMDGPU_FAMILY_YC:
5259 	case AMDGPU_FAMILY_GC_10_3_6:
5260 	case AMDGPU_FAMILY_GC_10_3_7:
5261 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5262 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5263 		else
5264 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5265 		break;
5266 	}
5267 
5268 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5269 
5270 	/* INVALID marks the end of the list. */
5271 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5272 
5273 	if (!*mods)
5274 		return -ENOMEM;
5275 
5276 	return 0;
5277 }
5278 
5279 static int
5280 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5281 					  const struct amdgpu_framebuffer *afb,
5282 					  const enum surface_pixel_format format,
5283 					  const enum dc_rotation_angle rotation,
5284 					  const struct plane_size *plane_size,
5285 					  union dc_tiling_info *tiling_info,
5286 					  struct dc_plane_dcc_param *dcc,
5287 					  struct dc_plane_address *address,
5288 					  const bool force_disable_dcc)
5289 {
5290 	const uint64_t modifier = afb->base.modifier;
5291 	int ret = 0;
5292 
5293 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5294 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5295 
5296 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5297 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5298 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5299 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5300 
5301 		dcc->enable = 1;
5302 		dcc->meta_pitch = afb->base.pitches[1];
5303 		dcc->independent_64b_blks = independent_64b_blks;
5304 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5305 			if (independent_64b_blks && independent_128b_blks)
5306 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5307 			else if (independent_128b_blks)
5308 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5309 			else if (independent_64b_blks && !independent_128b_blks)
5310 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5311 			else
5312 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5313 		} else {
5314 			if (independent_64b_blks)
5315 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5316 			else
5317 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5318 		}
5319 
5320 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5321 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5322 	}
5323 
5324 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5325 	if (ret)
5326 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5327 
5328 	return ret;
5329 }
5330 
5331 static int
5332 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5333 			     const struct amdgpu_framebuffer *afb,
5334 			     const enum surface_pixel_format format,
5335 			     const enum dc_rotation_angle rotation,
5336 			     const uint64_t tiling_flags,
5337 			     union dc_tiling_info *tiling_info,
5338 			     struct plane_size *plane_size,
5339 			     struct dc_plane_dcc_param *dcc,
5340 			     struct dc_plane_address *address,
5341 			     bool tmz_surface,
5342 			     bool force_disable_dcc)
5343 {
5344 	const struct drm_framebuffer *fb = &afb->base;
5345 	int ret;
5346 
5347 	memset(tiling_info, 0, sizeof(*tiling_info));
5348 	memset(plane_size, 0, sizeof(*plane_size));
5349 	memset(dcc, 0, sizeof(*dcc));
5350 	memset(address, 0, sizeof(*address));
5351 
5352 	address->tmz_surface = tmz_surface;
5353 
5354 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5355 		uint64_t addr = afb->address + fb->offsets[0];
5356 
5357 		plane_size->surface_size.x = 0;
5358 		plane_size->surface_size.y = 0;
5359 		plane_size->surface_size.width = fb->width;
5360 		plane_size->surface_size.height = fb->height;
5361 		plane_size->surface_pitch =
5362 			fb->pitches[0] / fb->format->cpp[0];
5363 
5364 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5365 		address->grph.addr.low_part = lower_32_bits(addr);
5366 		address->grph.addr.high_part = upper_32_bits(addr);
5367 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5368 		uint64_t luma_addr = afb->address + fb->offsets[0];
5369 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5370 
5371 		plane_size->surface_size.x = 0;
5372 		plane_size->surface_size.y = 0;
5373 		plane_size->surface_size.width = fb->width;
5374 		plane_size->surface_size.height = fb->height;
5375 		plane_size->surface_pitch =
5376 			fb->pitches[0] / fb->format->cpp[0];
5377 
5378 		plane_size->chroma_size.x = 0;
5379 		plane_size->chroma_size.y = 0;
5380 		/* TODO: set these based on surface format */
5381 		plane_size->chroma_size.width = fb->width / 2;
5382 		plane_size->chroma_size.height = fb->height / 2;
5383 
5384 		plane_size->chroma_pitch =
5385 			fb->pitches[1] / fb->format->cpp[1];
5386 
5387 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5388 		address->video_progressive.luma_addr.low_part =
5389 			lower_32_bits(luma_addr);
5390 		address->video_progressive.luma_addr.high_part =
5391 			upper_32_bits(luma_addr);
5392 		address->video_progressive.chroma_addr.low_part =
5393 			lower_32_bits(chroma_addr);
5394 		address->video_progressive.chroma_addr.high_part =
5395 			upper_32_bits(chroma_addr);
5396 	}
5397 
5398 	if (adev->family >= AMDGPU_FAMILY_AI) {
5399 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5400 								rotation, plane_size,
5401 								tiling_info, dcc,
5402 								address,
5403 								force_disable_dcc);
5404 		if (ret)
5405 			return ret;
5406 	} else {
5407 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5408 	}
5409 
5410 	return 0;
5411 }
5412 
5413 static void
5414 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5415 			       bool *per_pixel_alpha, bool *global_alpha,
5416 			       int *global_alpha_value)
5417 {
5418 	*per_pixel_alpha = false;
5419 	*global_alpha = false;
5420 	*global_alpha_value = 0xff;
5421 
5422 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5423 		return;
5424 
5425 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5426 		static const uint32_t alpha_formats[] = {
5427 			DRM_FORMAT_ARGB8888,
5428 			DRM_FORMAT_RGBA8888,
5429 			DRM_FORMAT_ABGR8888,
5430 		};
5431 		uint32_t format = plane_state->fb->format->format;
5432 		unsigned int i;
5433 
5434 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5435 			if (format == alpha_formats[i]) {
5436 				*per_pixel_alpha = true;
5437 				break;
5438 			}
5439 		}
5440 	}
5441 
5442 	if (plane_state->alpha < 0xffff) {
5443 		*global_alpha = true;
5444 		*global_alpha_value = plane_state->alpha >> 8;
5445 	}
5446 }
5447 
5448 static int
5449 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5450 			    const enum surface_pixel_format format,
5451 			    enum dc_color_space *color_space)
5452 {
5453 	bool full_range;
5454 
5455 	*color_space = COLOR_SPACE_SRGB;
5456 
5457 	/* DRM color properties only affect non-RGB formats. */
5458 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5459 		return 0;
5460 
5461 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5462 
5463 	switch (plane_state->color_encoding) {
5464 	case DRM_COLOR_YCBCR_BT601:
5465 		if (full_range)
5466 			*color_space = COLOR_SPACE_YCBCR601;
5467 		else
5468 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5469 		break;
5470 
5471 	case DRM_COLOR_YCBCR_BT709:
5472 		if (full_range)
5473 			*color_space = COLOR_SPACE_YCBCR709;
5474 		else
5475 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5476 		break;
5477 
5478 	case DRM_COLOR_YCBCR_BT2020:
5479 		if (full_range)
5480 			*color_space = COLOR_SPACE_2020_YCBCR;
5481 		else
5482 			return -EINVAL;
5483 		break;
5484 
5485 	default:
5486 		return -EINVAL;
5487 	}
5488 
5489 	return 0;
5490 }
5491 
5492 static int
5493 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5494 			    const struct drm_plane_state *plane_state,
5495 			    const uint64_t tiling_flags,
5496 			    struct dc_plane_info *plane_info,
5497 			    struct dc_plane_address *address,
5498 			    bool tmz_surface,
5499 			    bool force_disable_dcc)
5500 {
5501 	const struct drm_framebuffer *fb = plane_state->fb;
5502 	const struct amdgpu_framebuffer *afb =
5503 		to_amdgpu_framebuffer(plane_state->fb);
5504 	int ret;
5505 
5506 	memset(plane_info, 0, sizeof(*plane_info));
5507 
5508 	switch (fb->format->format) {
5509 	case DRM_FORMAT_C8:
5510 		plane_info->format =
5511 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5512 		break;
5513 	case DRM_FORMAT_RGB565:
5514 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5515 		break;
5516 	case DRM_FORMAT_XRGB8888:
5517 	case DRM_FORMAT_ARGB8888:
5518 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5519 		break;
5520 	case DRM_FORMAT_XRGB2101010:
5521 	case DRM_FORMAT_ARGB2101010:
5522 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5523 		break;
5524 	case DRM_FORMAT_XBGR2101010:
5525 	case DRM_FORMAT_ABGR2101010:
5526 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5527 		break;
5528 	case DRM_FORMAT_XBGR8888:
5529 	case DRM_FORMAT_ABGR8888:
5530 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5531 		break;
5532 	case DRM_FORMAT_NV21:
5533 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5534 		break;
5535 	case DRM_FORMAT_NV12:
5536 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5537 		break;
5538 	case DRM_FORMAT_P010:
5539 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5540 		break;
5541 	case DRM_FORMAT_XRGB16161616F:
5542 	case DRM_FORMAT_ARGB16161616F:
5543 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5544 		break;
5545 	case DRM_FORMAT_XBGR16161616F:
5546 	case DRM_FORMAT_ABGR16161616F:
5547 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5548 		break;
5549 	case DRM_FORMAT_XRGB16161616:
5550 	case DRM_FORMAT_ARGB16161616:
5551 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5552 		break;
5553 	case DRM_FORMAT_XBGR16161616:
5554 	case DRM_FORMAT_ABGR16161616:
5555 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5556 		break;
5557 	default:
5558 		DRM_ERROR(
5559 			"Unsupported screen format %p4cc\n",
5560 			&fb->format->format);
5561 		return -EINVAL;
5562 	}
5563 
5564 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5565 	case DRM_MODE_ROTATE_0:
5566 		plane_info->rotation = ROTATION_ANGLE_0;
5567 		break;
5568 	case DRM_MODE_ROTATE_90:
5569 		plane_info->rotation = ROTATION_ANGLE_90;
5570 		break;
5571 	case DRM_MODE_ROTATE_180:
5572 		plane_info->rotation = ROTATION_ANGLE_180;
5573 		break;
5574 	case DRM_MODE_ROTATE_270:
5575 		plane_info->rotation = ROTATION_ANGLE_270;
5576 		break;
5577 	default:
5578 		plane_info->rotation = ROTATION_ANGLE_0;
5579 		break;
5580 	}
5581 
5582 	plane_info->visible = true;
5583 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5584 
5585 	plane_info->layer_index = 0;
5586 
5587 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5588 					  &plane_info->color_space);
5589 	if (ret)
5590 		return ret;
5591 
5592 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5593 					   plane_info->rotation, tiling_flags,
5594 					   &plane_info->tiling_info,
5595 					   &plane_info->plane_size,
5596 					   &plane_info->dcc, address, tmz_surface,
5597 					   force_disable_dcc);
5598 	if (ret)
5599 		return ret;
5600 
5601 	fill_blending_from_plane_state(
5602 		plane_state, &plane_info->per_pixel_alpha,
5603 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5604 
5605 	return 0;
5606 }
5607 
5608 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5609 				    struct dc_plane_state *dc_plane_state,
5610 				    struct drm_plane_state *plane_state,
5611 				    struct drm_crtc_state *crtc_state)
5612 {
5613 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5614 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5615 	struct dc_scaling_info scaling_info;
5616 	struct dc_plane_info plane_info;
5617 	int ret;
5618 	bool force_disable_dcc = false;
5619 
5620 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5621 	if (ret)
5622 		return ret;
5623 
5624 	dc_plane_state->src_rect = scaling_info.src_rect;
5625 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5626 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5627 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5628 
5629 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5630 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5631 					  afb->tiling_flags,
5632 					  &plane_info,
5633 					  &dc_plane_state->address,
5634 					  afb->tmz_surface,
5635 					  force_disable_dcc);
5636 	if (ret)
5637 		return ret;
5638 
5639 	dc_plane_state->format = plane_info.format;
5640 	dc_plane_state->color_space = plane_info.color_space;
5641 	dc_plane_state->format = plane_info.format;
5642 	dc_plane_state->plane_size = plane_info.plane_size;
5643 	dc_plane_state->rotation = plane_info.rotation;
5644 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5645 	dc_plane_state->stereo_format = plane_info.stereo_format;
5646 	dc_plane_state->tiling_info = plane_info.tiling_info;
5647 	dc_plane_state->visible = plane_info.visible;
5648 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5649 	dc_plane_state->global_alpha = plane_info.global_alpha;
5650 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5651 	dc_plane_state->dcc = plane_info.dcc;
5652 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5653 	dc_plane_state->flip_int_enabled = true;
5654 
5655 	/*
5656 	 * Always set input transfer function, since plane state is refreshed
5657 	 * every time.
5658 	 */
5659 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5660 	if (ret)
5661 		return ret;
5662 
5663 	return 0;
5664 }
5665 
5666 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5667 					   const struct dm_connector_state *dm_state,
5668 					   struct dc_stream_state *stream)
5669 {
5670 	enum amdgpu_rmx_type rmx_type;
5671 
5672 	struct rect src = { 0 }; /* viewport in composition space*/
5673 	struct rect dst = { 0 }; /* stream addressable area */
5674 
5675 	/* no mode. nothing to be done */
5676 	if (!mode)
5677 		return;
5678 
5679 	/* Full screen scaling by default */
5680 	src.width = mode->hdisplay;
5681 	src.height = mode->vdisplay;
5682 	dst.width = stream->timing.h_addressable;
5683 	dst.height = stream->timing.v_addressable;
5684 
5685 	if (dm_state) {
5686 		rmx_type = dm_state->scaling;
5687 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5688 			if (src.width * dst.height <
5689 					src.height * dst.width) {
5690 				/* height needs less upscaling/more downscaling */
5691 				dst.width = src.width *
5692 						dst.height / src.height;
5693 			} else {
5694 				/* width needs less upscaling/more downscaling */
5695 				dst.height = src.height *
5696 						dst.width / src.width;
5697 			}
5698 		} else if (rmx_type == RMX_CENTER) {
5699 			dst = src;
5700 		}
5701 
5702 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5703 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5704 
5705 		if (dm_state->underscan_enable) {
5706 			dst.x += dm_state->underscan_hborder / 2;
5707 			dst.y += dm_state->underscan_vborder / 2;
5708 			dst.width -= dm_state->underscan_hborder;
5709 			dst.height -= dm_state->underscan_vborder;
5710 		}
5711 	}
5712 
5713 	stream->src = src;
5714 	stream->dst = dst;
5715 
5716 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5717 		      dst.x, dst.y, dst.width, dst.height);
5718 
5719 }
5720 
5721 static enum dc_color_depth
5722 convert_color_depth_from_display_info(const struct drm_connector *connector,
5723 				      bool is_y420, int requested_bpc)
5724 {
5725 	uint8_t bpc;
5726 
5727 	if (is_y420) {
5728 		bpc = 8;
5729 
5730 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5731 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5732 			bpc = 16;
5733 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5734 			bpc = 12;
5735 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5736 			bpc = 10;
5737 	} else {
5738 		bpc = (uint8_t)connector->display_info.bpc;
5739 		/* Assume 8 bpc by default if no bpc is specified. */
5740 		bpc = bpc ? bpc : 8;
5741 	}
5742 
5743 	if (requested_bpc > 0) {
5744 		/*
5745 		 * Cap display bpc based on the user requested value.
5746 		 *
5747 		 * The value for state->max_bpc may not correctly updated
5748 		 * depending on when the connector gets added to the state
5749 		 * or if this was called outside of atomic check, so it
5750 		 * can't be used directly.
5751 		 */
5752 		bpc = min_t(u8, bpc, requested_bpc);
5753 
5754 		/* Round down to the nearest even number. */
5755 		bpc = bpc - (bpc & 1);
5756 	}
5757 
5758 	switch (bpc) {
5759 	case 0:
5760 		/*
5761 		 * Temporary Work around, DRM doesn't parse color depth for
5762 		 * EDID revision before 1.4
5763 		 * TODO: Fix edid parsing
5764 		 */
5765 		return COLOR_DEPTH_888;
5766 	case 6:
5767 		return COLOR_DEPTH_666;
5768 	case 8:
5769 		return COLOR_DEPTH_888;
5770 	case 10:
5771 		return COLOR_DEPTH_101010;
5772 	case 12:
5773 		return COLOR_DEPTH_121212;
5774 	case 14:
5775 		return COLOR_DEPTH_141414;
5776 	case 16:
5777 		return COLOR_DEPTH_161616;
5778 	default:
5779 		return COLOR_DEPTH_UNDEFINED;
5780 	}
5781 }
5782 
5783 static enum dc_aspect_ratio
5784 get_aspect_ratio(const struct drm_display_mode *mode_in)
5785 {
5786 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5787 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5788 }
5789 
5790 static enum dc_color_space
5791 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5792 {
5793 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5794 
5795 	switch (dc_crtc_timing->pixel_encoding)	{
5796 	case PIXEL_ENCODING_YCBCR422:
5797 	case PIXEL_ENCODING_YCBCR444:
5798 	case PIXEL_ENCODING_YCBCR420:
5799 	{
5800 		/*
5801 		 * 27030khz is the separation point between HDTV and SDTV
5802 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5803 		 * respectively
5804 		 */
5805 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5806 			if (dc_crtc_timing->flags.Y_ONLY)
5807 				color_space =
5808 					COLOR_SPACE_YCBCR709_LIMITED;
5809 			else
5810 				color_space = COLOR_SPACE_YCBCR709;
5811 		} else {
5812 			if (dc_crtc_timing->flags.Y_ONLY)
5813 				color_space =
5814 					COLOR_SPACE_YCBCR601_LIMITED;
5815 			else
5816 				color_space = COLOR_SPACE_YCBCR601;
5817 		}
5818 
5819 	}
5820 	break;
5821 	case PIXEL_ENCODING_RGB:
5822 		color_space = COLOR_SPACE_SRGB;
5823 		break;
5824 
5825 	default:
5826 		WARN_ON(1);
5827 		break;
5828 	}
5829 
5830 	return color_space;
5831 }
5832 
5833 static bool adjust_colour_depth_from_display_info(
5834 	struct dc_crtc_timing *timing_out,
5835 	const struct drm_display_info *info)
5836 {
5837 	enum dc_color_depth depth = timing_out->display_color_depth;
5838 	int normalized_clk;
5839 	do {
5840 		normalized_clk = timing_out->pix_clk_100hz / 10;
5841 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5842 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5843 			normalized_clk /= 2;
5844 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5845 		switch (depth) {
5846 		case COLOR_DEPTH_888:
5847 			break;
5848 		case COLOR_DEPTH_101010:
5849 			normalized_clk = (normalized_clk * 30) / 24;
5850 			break;
5851 		case COLOR_DEPTH_121212:
5852 			normalized_clk = (normalized_clk * 36) / 24;
5853 			break;
5854 		case COLOR_DEPTH_161616:
5855 			normalized_clk = (normalized_clk * 48) / 24;
5856 			break;
5857 		default:
5858 			/* The above depths are the only ones valid for HDMI. */
5859 			return false;
5860 		}
5861 		if (normalized_clk <= info->max_tmds_clock) {
5862 			timing_out->display_color_depth = depth;
5863 			return true;
5864 		}
5865 	} while (--depth > COLOR_DEPTH_666);
5866 	return false;
5867 }
5868 
5869 static void fill_stream_properties_from_drm_display_mode(
5870 	struct dc_stream_state *stream,
5871 	const struct drm_display_mode *mode_in,
5872 	const struct drm_connector *connector,
5873 	const struct drm_connector_state *connector_state,
5874 	const struct dc_stream_state *old_stream,
5875 	int requested_bpc)
5876 {
5877 	struct dc_crtc_timing *timing_out = &stream->timing;
5878 	const struct drm_display_info *info = &connector->display_info;
5879 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5880 	struct hdmi_vendor_infoframe hv_frame;
5881 	struct hdmi_avi_infoframe avi_frame;
5882 
5883 	memset(&hv_frame, 0, sizeof(hv_frame));
5884 	memset(&avi_frame, 0, sizeof(avi_frame));
5885 
5886 	timing_out->h_border_left = 0;
5887 	timing_out->h_border_right = 0;
5888 	timing_out->v_border_top = 0;
5889 	timing_out->v_border_bottom = 0;
5890 	/* TODO: un-hardcode */
5891 	if (drm_mode_is_420_only(info, mode_in)
5892 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5893 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5894 	else if (drm_mode_is_420_also(info, mode_in)
5895 			&& aconnector->force_yuv420_output)
5896 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5897 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5898 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5899 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5900 	else
5901 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5902 
5903 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5904 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5905 		connector,
5906 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5907 		requested_bpc);
5908 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5909 	timing_out->hdmi_vic = 0;
5910 
5911 	if(old_stream) {
5912 		timing_out->vic = old_stream->timing.vic;
5913 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5914 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5915 	} else {
5916 		timing_out->vic = drm_match_cea_mode(mode_in);
5917 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5918 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5919 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5920 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5921 	}
5922 
5923 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5924 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5925 		timing_out->vic = avi_frame.video_code;
5926 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5927 		timing_out->hdmi_vic = hv_frame.vic;
5928 	}
5929 
5930 	if (is_freesync_video_mode(mode_in, aconnector)) {
5931 		timing_out->h_addressable = mode_in->hdisplay;
5932 		timing_out->h_total = mode_in->htotal;
5933 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5934 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5935 		timing_out->v_total = mode_in->vtotal;
5936 		timing_out->v_addressable = mode_in->vdisplay;
5937 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5938 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5939 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5940 	} else {
5941 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5942 		timing_out->h_total = mode_in->crtc_htotal;
5943 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5944 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5945 		timing_out->v_total = mode_in->crtc_vtotal;
5946 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5947 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5948 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5949 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5950 	}
5951 
5952 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5953 
5954 	stream->output_color_space = get_output_color_space(timing_out);
5955 
5956 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5957 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5958 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5959 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5960 		    drm_mode_is_420_also(info, mode_in) &&
5961 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5962 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5963 			adjust_colour_depth_from_display_info(timing_out, info);
5964 		}
5965 	}
5966 }
5967 
5968 static void fill_audio_info(struct audio_info *audio_info,
5969 			    const struct drm_connector *drm_connector,
5970 			    const struct dc_sink *dc_sink)
5971 {
5972 	int i = 0;
5973 	int cea_revision = 0;
5974 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5975 
5976 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5977 	audio_info->product_id = edid_caps->product_id;
5978 
5979 	cea_revision = drm_connector->display_info.cea_rev;
5980 
5981 	strscpy(audio_info->display_name,
5982 		edid_caps->display_name,
5983 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5984 
5985 	if (cea_revision >= 3) {
5986 		audio_info->mode_count = edid_caps->audio_mode_count;
5987 
5988 		for (i = 0; i < audio_info->mode_count; ++i) {
5989 			audio_info->modes[i].format_code =
5990 					(enum audio_format_code)
5991 					(edid_caps->audio_modes[i].format_code);
5992 			audio_info->modes[i].channel_count =
5993 					edid_caps->audio_modes[i].channel_count;
5994 			audio_info->modes[i].sample_rates.all =
5995 					edid_caps->audio_modes[i].sample_rate;
5996 			audio_info->modes[i].sample_size =
5997 					edid_caps->audio_modes[i].sample_size;
5998 		}
5999 	}
6000 
6001 	audio_info->flags.all = edid_caps->speaker_flags;
6002 
6003 	/* TODO: We only check for the progressive mode, check for interlace mode too */
6004 	if (drm_connector->latency_present[0]) {
6005 		audio_info->video_latency = drm_connector->video_latency[0];
6006 		audio_info->audio_latency = drm_connector->audio_latency[0];
6007 	}
6008 
6009 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6010 
6011 }
6012 
6013 static void
6014 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6015 				      struct drm_display_mode *dst_mode)
6016 {
6017 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6018 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6019 	dst_mode->crtc_clock = src_mode->crtc_clock;
6020 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6021 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6022 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6023 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6024 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6025 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6026 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6027 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6028 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6029 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6030 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6031 }
6032 
6033 static void
6034 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6035 					const struct drm_display_mode *native_mode,
6036 					bool scale_enabled)
6037 {
6038 	if (scale_enabled) {
6039 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6040 	} else if (native_mode->clock == drm_mode->clock &&
6041 			native_mode->htotal == drm_mode->htotal &&
6042 			native_mode->vtotal == drm_mode->vtotal) {
6043 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6044 	} else {
6045 		/* no scaling nor amdgpu inserted, no need to patch */
6046 	}
6047 }
6048 
6049 static struct dc_sink *
6050 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6051 {
6052 	struct dc_sink_init_data sink_init_data = { 0 };
6053 	struct dc_sink *sink = NULL;
6054 	sink_init_data.link = aconnector->dc_link;
6055 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6056 
6057 	sink = dc_sink_create(&sink_init_data);
6058 	if (!sink) {
6059 		DRM_ERROR("Failed to create sink!\n");
6060 		return NULL;
6061 	}
6062 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6063 
6064 	return sink;
6065 }
6066 
6067 static void set_multisync_trigger_params(
6068 		struct dc_stream_state *stream)
6069 {
6070 	struct dc_stream_state *master = NULL;
6071 
6072 	if (stream->triggered_crtc_reset.enabled) {
6073 		master = stream->triggered_crtc_reset.event_source;
6074 		stream->triggered_crtc_reset.event =
6075 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6076 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6077 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6078 	}
6079 }
6080 
6081 static void set_master_stream(struct dc_stream_state *stream_set[],
6082 			      int stream_count)
6083 {
6084 	int j, highest_rfr = 0, master_stream = 0;
6085 
6086 	for (j = 0;  j < stream_count; j++) {
6087 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6088 			int refresh_rate = 0;
6089 
6090 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6091 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6092 			if (refresh_rate > highest_rfr) {
6093 				highest_rfr = refresh_rate;
6094 				master_stream = j;
6095 			}
6096 		}
6097 	}
6098 	for (j = 0;  j < stream_count; j++) {
6099 		if (stream_set[j])
6100 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6101 	}
6102 }
6103 
6104 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6105 {
6106 	int i = 0;
6107 	struct dc_stream_state *stream;
6108 
6109 	if (context->stream_count < 2)
6110 		return;
6111 	for (i = 0; i < context->stream_count ; i++) {
6112 		if (!context->streams[i])
6113 			continue;
6114 		/*
6115 		 * TODO: add a function to read AMD VSDB bits and set
6116 		 * crtc_sync_master.multi_sync_enabled flag
6117 		 * For now it's set to false
6118 		 */
6119 	}
6120 
6121 	set_master_stream(context->streams, context->stream_count);
6122 
6123 	for (i = 0; i < context->stream_count ; i++) {
6124 		stream = context->streams[i];
6125 
6126 		if (!stream)
6127 			continue;
6128 
6129 		set_multisync_trigger_params(stream);
6130 	}
6131 }
6132 
6133 #if defined(CONFIG_DRM_AMD_DC_DCN)
6134 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6135 							struct dc_sink *sink, struct dc_stream_state *stream,
6136 							struct dsc_dec_dpcd_caps *dsc_caps)
6137 {
6138 	stream->timing.flags.DSC = 0;
6139 	dsc_caps->is_dsc_supported = false;
6140 
6141 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6142 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6143 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6144 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6145 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6146 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6147 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6148 				dsc_caps);
6149 	}
6150 }
6151 
6152 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6153 				    struct dc_sink *sink, struct dc_stream_state *stream,
6154 				    struct dsc_dec_dpcd_caps *dsc_caps,
6155 				    uint32_t max_dsc_target_bpp_limit_override)
6156 {
6157 	const struct dc_link_settings *verified_link_cap = NULL;
6158 	uint32_t link_bw_in_kbps;
6159 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6160 	struct dc *dc = sink->ctx->dc;
6161 	struct dc_dsc_bw_range bw_range = {0};
6162 	struct dc_dsc_config dsc_cfg = {0};
6163 
6164 	verified_link_cap = dc_link_get_link_cap(stream->link);
6165 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6166 	edp_min_bpp_x16 = 8 * 16;
6167 	edp_max_bpp_x16 = 8 * 16;
6168 
6169 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6170 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6171 
6172 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6173 		edp_min_bpp_x16 = edp_max_bpp_x16;
6174 
6175 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6176 				dc->debug.dsc_min_slice_height_override,
6177 				edp_min_bpp_x16, edp_max_bpp_x16,
6178 				dsc_caps,
6179 				&stream->timing,
6180 				&bw_range)) {
6181 
6182 		if (bw_range.max_kbps < link_bw_in_kbps) {
6183 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6184 					dsc_caps,
6185 					dc->debug.dsc_min_slice_height_override,
6186 					max_dsc_target_bpp_limit_override,
6187 					0,
6188 					&stream->timing,
6189 					&dsc_cfg)) {
6190 				stream->timing.dsc_cfg = dsc_cfg;
6191 				stream->timing.flags.DSC = 1;
6192 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6193 			}
6194 			return;
6195 		}
6196 	}
6197 
6198 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6199 				dsc_caps,
6200 				dc->debug.dsc_min_slice_height_override,
6201 				max_dsc_target_bpp_limit_override,
6202 				link_bw_in_kbps,
6203 				&stream->timing,
6204 				&dsc_cfg)) {
6205 		stream->timing.dsc_cfg = dsc_cfg;
6206 		stream->timing.flags.DSC = 1;
6207 	}
6208 }
6209 
6210 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6211 										struct dc_sink *sink, struct dc_stream_state *stream,
6212 										struct dsc_dec_dpcd_caps *dsc_caps)
6213 {
6214 	struct drm_connector *drm_connector = &aconnector->base;
6215 	uint32_t link_bandwidth_kbps;
6216 	uint32_t max_dsc_target_bpp_limit_override = 0;
6217 	struct dc *dc = sink->ctx->dc;
6218 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6219 	uint32_t dsc_max_supported_bw_in_kbps;
6220 
6221 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6222 							dc_link_get_link_cap(aconnector->dc_link));
6223 
6224 	if (stream->link && stream->link->local_sink)
6225 		max_dsc_target_bpp_limit_override =
6226 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6227 
6228 	/* Set DSC policy according to dsc_clock_en */
6229 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6230 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6231 
6232 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6233 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6234 
6235 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6236 
6237 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6238 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6239 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6240 						dsc_caps,
6241 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6242 						max_dsc_target_bpp_limit_override,
6243 						link_bandwidth_kbps,
6244 						&stream->timing,
6245 						&stream->timing.dsc_cfg)) {
6246 				stream->timing.flags.DSC = 1;
6247 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6248 								 __func__, drm_connector->name);
6249 			}
6250 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6251 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6252 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6253 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6254 
6255 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6256 					max_supported_bw_in_kbps > 0 &&
6257 					dsc_max_supported_bw_in_kbps > 0)
6258 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6259 						dsc_caps,
6260 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6261 						max_dsc_target_bpp_limit_override,
6262 						dsc_max_supported_bw_in_kbps,
6263 						&stream->timing,
6264 						&stream->timing.dsc_cfg)) {
6265 					stream->timing.flags.DSC = 1;
6266 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6267 									 __func__, drm_connector->name);
6268 				}
6269 		}
6270 	}
6271 
6272 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6273 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6274 		stream->timing.flags.DSC = 1;
6275 
6276 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6277 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6278 
6279 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6280 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6281 
6282 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6283 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6284 }
6285 #endif /* CONFIG_DRM_AMD_DC_DCN */
6286 
6287 /**
6288  * DOC: FreeSync Video
6289  *
6290  * When a userspace application wants to play a video, the content follows a
6291  * standard format definition that usually specifies the FPS for that format.
6292  * The below list illustrates some video format and the expected FPS,
6293  * respectively:
6294  *
6295  * - TV/NTSC (23.976 FPS)
6296  * - Cinema (24 FPS)
6297  * - TV/PAL (25 FPS)
6298  * - TV/NTSC (29.97 FPS)
6299  * - TV/NTSC (30 FPS)
6300  * - Cinema HFR (48 FPS)
6301  * - TV/PAL (50 FPS)
6302  * - Commonly used (60 FPS)
6303  * - Multiples of 24 (48,72,96,120 FPS)
6304  *
6305  * The list of standards video format is not huge and can be added to the
6306  * connector modeset list beforehand. With that, userspace can leverage
6307  * FreeSync to extends the front porch in order to attain the target refresh
6308  * rate. Such a switch will happen seamlessly, without screen blanking or
6309  * reprogramming of the output in any other way. If the userspace requests a
6310  * modesetting change compatible with FreeSync modes that only differ in the
6311  * refresh rate, DC will skip the full update and avoid blink during the
6312  * transition. For example, the video player can change the modesetting from
6313  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6314  * causing any display blink. This same concept can be applied to a mode
6315  * setting change.
6316  */
6317 static struct drm_display_mode *
6318 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6319 			  bool use_probed_modes)
6320 {
6321 	struct drm_display_mode *m, *m_pref = NULL;
6322 	u16 current_refresh, highest_refresh;
6323 	struct list_head *list_head = use_probed_modes ?
6324 						    &aconnector->base.probed_modes :
6325 						    &aconnector->base.modes;
6326 
6327 	if (aconnector->freesync_vid_base.clock != 0)
6328 		return &aconnector->freesync_vid_base;
6329 
6330 	/* Find the preferred mode */
6331 	list_for_each_entry (m, list_head, head) {
6332 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6333 			m_pref = m;
6334 			break;
6335 		}
6336 	}
6337 
6338 	if (!m_pref) {
6339 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6340 		m_pref = list_first_entry_or_null(
6341 			&aconnector->base.modes, struct drm_display_mode, head);
6342 		if (!m_pref) {
6343 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6344 			return NULL;
6345 		}
6346 	}
6347 
6348 	highest_refresh = drm_mode_vrefresh(m_pref);
6349 
6350 	/*
6351 	 * Find the mode with highest refresh rate with same resolution.
6352 	 * For some monitors, preferred mode is not the mode with highest
6353 	 * supported refresh rate.
6354 	 */
6355 	list_for_each_entry (m, list_head, head) {
6356 		current_refresh  = drm_mode_vrefresh(m);
6357 
6358 		if (m->hdisplay == m_pref->hdisplay &&
6359 		    m->vdisplay == m_pref->vdisplay &&
6360 		    highest_refresh < current_refresh) {
6361 			highest_refresh = current_refresh;
6362 			m_pref = m;
6363 		}
6364 	}
6365 
6366 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6367 	return m_pref;
6368 }
6369 
6370 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6371 				   struct amdgpu_dm_connector *aconnector)
6372 {
6373 	struct drm_display_mode *high_mode;
6374 	int timing_diff;
6375 
6376 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6377 	if (!high_mode || !mode)
6378 		return false;
6379 
6380 	timing_diff = high_mode->vtotal - mode->vtotal;
6381 
6382 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6383 	    high_mode->hdisplay != mode->hdisplay ||
6384 	    high_mode->vdisplay != mode->vdisplay ||
6385 	    high_mode->hsync_start != mode->hsync_start ||
6386 	    high_mode->hsync_end != mode->hsync_end ||
6387 	    high_mode->htotal != mode->htotal ||
6388 	    high_mode->hskew != mode->hskew ||
6389 	    high_mode->vscan != mode->vscan ||
6390 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6391 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6392 		return false;
6393 	else
6394 		return true;
6395 }
6396 
6397 static struct dc_stream_state *
6398 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6399 		       const struct drm_display_mode *drm_mode,
6400 		       const struct dm_connector_state *dm_state,
6401 		       const struct dc_stream_state *old_stream,
6402 		       int requested_bpc)
6403 {
6404 	struct drm_display_mode *preferred_mode = NULL;
6405 	struct drm_connector *drm_connector;
6406 	const struct drm_connector_state *con_state =
6407 		dm_state ? &dm_state->base : NULL;
6408 	struct dc_stream_state *stream = NULL;
6409 	struct drm_display_mode mode = *drm_mode;
6410 	struct drm_display_mode saved_mode;
6411 	struct drm_display_mode *freesync_mode = NULL;
6412 	bool native_mode_found = false;
6413 	bool recalculate_timing = false;
6414 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6415 	int mode_refresh;
6416 	int preferred_refresh = 0;
6417 #if defined(CONFIG_DRM_AMD_DC_DCN)
6418 	struct dsc_dec_dpcd_caps dsc_caps;
6419 #endif
6420 	struct dc_sink *sink = NULL;
6421 
6422 	memset(&saved_mode, 0, sizeof(saved_mode));
6423 
6424 	if (aconnector == NULL) {
6425 		DRM_ERROR("aconnector is NULL!\n");
6426 		return stream;
6427 	}
6428 
6429 	drm_connector = &aconnector->base;
6430 
6431 	if (!aconnector->dc_sink) {
6432 		sink = create_fake_sink(aconnector);
6433 		if (!sink)
6434 			return stream;
6435 	} else {
6436 		sink = aconnector->dc_sink;
6437 		dc_sink_retain(sink);
6438 	}
6439 
6440 	stream = dc_create_stream_for_sink(sink);
6441 
6442 	if (stream == NULL) {
6443 		DRM_ERROR("Failed to create stream for sink!\n");
6444 		goto finish;
6445 	}
6446 
6447 	stream->dm_stream_context = aconnector;
6448 
6449 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6450 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6451 
6452 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6453 		/* Search for preferred mode */
6454 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6455 			native_mode_found = true;
6456 			break;
6457 		}
6458 	}
6459 	if (!native_mode_found)
6460 		preferred_mode = list_first_entry_or_null(
6461 				&aconnector->base.modes,
6462 				struct drm_display_mode,
6463 				head);
6464 
6465 	mode_refresh = drm_mode_vrefresh(&mode);
6466 
6467 	if (preferred_mode == NULL) {
6468 		/*
6469 		 * This may not be an error, the use case is when we have no
6470 		 * usermode calls to reset and set mode upon hotplug. In this
6471 		 * case, we call set mode ourselves to restore the previous mode
6472 		 * and the modelist may not be filled in in time.
6473 		 */
6474 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6475 	} else {
6476 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6477 		if (recalculate_timing) {
6478 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6479 			drm_mode_copy(&saved_mode, &mode);
6480 			drm_mode_copy(&mode, freesync_mode);
6481 		} else {
6482 			decide_crtc_timing_for_drm_display_mode(
6483 				&mode, preferred_mode, scale);
6484 
6485 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6486 		}
6487 	}
6488 
6489 	if (recalculate_timing)
6490 		drm_mode_set_crtcinfo(&saved_mode, 0);
6491 	else if (!dm_state)
6492 		drm_mode_set_crtcinfo(&mode, 0);
6493 
6494        /*
6495 	* If scaling is enabled and refresh rate didn't change
6496 	* we copy the vic and polarities of the old timings
6497 	*/
6498 	if (!scale || mode_refresh != preferred_refresh)
6499 		fill_stream_properties_from_drm_display_mode(
6500 			stream, &mode, &aconnector->base, con_state, NULL,
6501 			requested_bpc);
6502 	else
6503 		fill_stream_properties_from_drm_display_mode(
6504 			stream, &mode, &aconnector->base, con_state, old_stream,
6505 			requested_bpc);
6506 
6507 #if defined(CONFIG_DRM_AMD_DC_DCN)
6508 	/* SST DSC determination policy */
6509 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6510 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6511 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6512 #endif
6513 
6514 	update_stream_scaling_settings(&mode, dm_state, stream);
6515 
6516 	fill_audio_info(
6517 		&stream->audio_info,
6518 		drm_connector,
6519 		sink);
6520 
6521 	update_stream_signal(stream, sink);
6522 
6523 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6524 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6525 
6526 	if (stream->link->psr_settings.psr_feature_enabled) {
6527 		//
6528 		// should decide stream support vsc sdp colorimetry capability
6529 		// before building vsc info packet
6530 		//
6531 		stream->use_vsc_sdp_for_colorimetry = false;
6532 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6533 			stream->use_vsc_sdp_for_colorimetry =
6534 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6535 		} else {
6536 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6537 				stream->use_vsc_sdp_for_colorimetry = true;
6538 		}
6539 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6540 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6541 
6542 	}
6543 finish:
6544 	dc_sink_release(sink);
6545 
6546 	return stream;
6547 }
6548 
6549 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6550 {
6551 	drm_crtc_cleanup(crtc);
6552 	kfree(crtc);
6553 }
6554 
6555 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6556 				  struct drm_crtc_state *state)
6557 {
6558 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6559 
6560 	/* TODO Destroy dc_stream objects are stream object is flattened */
6561 	if (cur->stream)
6562 		dc_stream_release(cur->stream);
6563 
6564 
6565 	__drm_atomic_helper_crtc_destroy_state(state);
6566 
6567 
6568 	kfree(state);
6569 }
6570 
6571 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6572 {
6573 	struct dm_crtc_state *state;
6574 
6575 	if (crtc->state)
6576 		dm_crtc_destroy_state(crtc, crtc->state);
6577 
6578 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6579 	if (WARN_ON(!state))
6580 		return;
6581 
6582 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6583 }
6584 
6585 static struct drm_crtc_state *
6586 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6587 {
6588 	struct dm_crtc_state *state, *cur;
6589 
6590 	cur = to_dm_crtc_state(crtc->state);
6591 
6592 	if (WARN_ON(!crtc->state))
6593 		return NULL;
6594 
6595 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6596 	if (!state)
6597 		return NULL;
6598 
6599 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6600 
6601 	if (cur->stream) {
6602 		state->stream = cur->stream;
6603 		dc_stream_retain(state->stream);
6604 	}
6605 
6606 	state->active_planes = cur->active_planes;
6607 	state->vrr_infopacket = cur->vrr_infopacket;
6608 	state->abm_level = cur->abm_level;
6609 	state->vrr_supported = cur->vrr_supported;
6610 	state->freesync_config = cur->freesync_config;
6611 	state->cm_has_degamma = cur->cm_has_degamma;
6612 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6613 	state->force_dpms_off = cur->force_dpms_off;
6614 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6615 
6616 	return &state->base;
6617 }
6618 
6619 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6620 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6621 {
6622 	crtc_debugfs_init(crtc);
6623 
6624 	return 0;
6625 }
6626 #endif
6627 
6628 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6629 {
6630 	enum dc_irq_source irq_source;
6631 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6632 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6633 	int rc;
6634 
6635 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6636 
6637 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6638 
6639 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6640 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6641 	return rc;
6642 }
6643 
6644 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6645 {
6646 	enum dc_irq_source irq_source;
6647 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6648 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6649 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6650 #if defined(CONFIG_DRM_AMD_DC_DCN)
6651 	struct amdgpu_display_manager *dm = &adev->dm;
6652 	struct vblank_control_work *work;
6653 #endif
6654 	int rc = 0;
6655 
6656 	if (enable) {
6657 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6658 		if (amdgpu_dm_vrr_active(acrtc_state))
6659 			rc = dm_set_vupdate_irq(crtc, true);
6660 	} else {
6661 		/* vblank irq off -> vupdate irq off */
6662 		rc = dm_set_vupdate_irq(crtc, false);
6663 	}
6664 
6665 	if (rc)
6666 		return rc;
6667 
6668 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6669 
6670 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6671 		return -EBUSY;
6672 
6673 	if (amdgpu_in_reset(adev))
6674 		return 0;
6675 
6676 #if defined(CONFIG_DRM_AMD_DC_DCN)
6677 	if (dm->vblank_control_workqueue) {
6678 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6679 		if (!work)
6680 			return -ENOMEM;
6681 
6682 		INIT_WORK(&work->work, vblank_control_worker);
6683 		work->dm = dm;
6684 		work->acrtc = acrtc;
6685 		work->enable = enable;
6686 
6687 		if (acrtc_state->stream) {
6688 			dc_stream_retain(acrtc_state->stream);
6689 			work->stream = acrtc_state->stream;
6690 		}
6691 
6692 		queue_work(dm->vblank_control_workqueue, &work->work);
6693 	}
6694 #endif
6695 
6696 	return 0;
6697 }
6698 
6699 static int dm_enable_vblank(struct drm_crtc *crtc)
6700 {
6701 	return dm_set_vblank(crtc, true);
6702 }
6703 
6704 static void dm_disable_vblank(struct drm_crtc *crtc)
6705 {
6706 	dm_set_vblank(crtc, false);
6707 }
6708 
6709 /* Implemented only the options currently availible for the driver */
6710 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6711 	.reset = dm_crtc_reset_state,
6712 	.destroy = amdgpu_dm_crtc_destroy,
6713 	.set_config = drm_atomic_helper_set_config,
6714 	.page_flip = drm_atomic_helper_page_flip,
6715 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6716 	.atomic_destroy_state = dm_crtc_destroy_state,
6717 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6718 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6719 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6720 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6721 	.enable_vblank = dm_enable_vblank,
6722 	.disable_vblank = dm_disable_vblank,
6723 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6724 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6725 	.late_register = amdgpu_dm_crtc_late_register,
6726 #endif
6727 };
6728 
6729 static enum drm_connector_status
6730 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6731 {
6732 	bool connected;
6733 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6734 
6735 	/*
6736 	 * Notes:
6737 	 * 1. This interface is NOT called in context of HPD irq.
6738 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6739 	 * makes it a bad place for *any* MST-related activity.
6740 	 */
6741 
6742 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6743 	    !aconnector->fake_enable)
6744 		connected = (aconnector->dc_sink != NULL);
6745 	else
6746 		connected = (aconnector->base.force == DRM_FORCE_ON);
6747 
6748 	update_subconnector_property(aconnector);
6749 
6750 	return (connected ? connector_status_connected :
6751 			connector_status_disconnected);
6752 }
6753 
6754 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6755 					    struct drm_connector_state *connector_state,
6756 					    struct drm_property *property,
6757 					    uint64_t val)
6758 {
6759 	struct drm_device *dev = connector->dev;
6760 	struct amdgpu_device *adev = drm_to_adev(dev);
6761 	struct dm_connector_state *dm_old_state =
6762 		to_dm_connector_state(connector->state);
6763 	struct dm_connector_state *dm_new_state =
6764 		to_dm_connector_state(connector_state);
6765 
6766 	int ret = -EINVAL;
6767 
6768 	if (property == dev->mode_config.scaling_mode_property) {
6769 		enum amdgpu_rmx_type rmx_type;
6770 
6771 		switch (val) {
6772 		case DRM_MODE_SCALE_CENTER:
6773 			rmx_type = RMX_CENTER;
6774 			break;
6775 		case DRM_MODE_SCALE_ASPECT:
6776 			rmx_type = RMX_ASPECT;
6777 			break;
6778 		case DRM_MODE_SCALE_FULLSCREEN:
6779 			rmx_type = RMX_FULL;
6780 			break;
6781 		case DRM_MODE_SCALE_NONE:
6782 		default:
6783 			rmx_type = RMX_OFF;
6784 			break;
6785 		}
6786 
6787 		if (dm_old_state->scaling == rmx_type)
6788 			return 0;
6789 
6790 		dm_new_state->scaling = rmx_type;
6791 		ret = 0;
6792 	} else if (property == adev->mode_info.underscan_hborder_property) {
6793 		dm_new_state->underscan_hborder = val;
6794 		ret = 0;
6795 	} else if (property == adev->mode_info.underscan_vborder_property) {
6796 		dm_new_state->underscan_vborder = val;
6797 		ret = 0;
6798 	} else if (property == adev->mode_info.underscan_property) {
6799 		dm_new_state->underscan_enable = val;
6800 		ret = 0;
6801 	} else if (property == adev->mode_info.abm_level_property) {
6802 		dm_new_state->abm_level = val;
6803 		ret = 0;
6804 	}
6805 
6806 	return ret;
6807 }
6808 
6809 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6810 					    const struct drm_connector_state *state,
6811 					    struct drm_property *property,
6812 					    uint64_t *val)
6813 {
6814 	struct drm_device *dev = connector->dev;
6815 	struct amdgpu_device *adev = drm_to_adev(dev);
6816 	struct dm_connector_state *dm_state =
6817 		to_dm_connector_state(state);
6818 	int ret = -EINVAL;
6819 
6820 	if (property == dev->mode_config.scaling_mode_property) {
6821 		switch (dm_state->scaling) {
6822 		case RMX_CENTER:
6823 			*val = DRM_MODE_SCALE_CENTER;
6824 			break;
6825 		case RMX_ASPECT:
6826 			*val = DRM_MODE_SCALE_ASPECT;
6827 			break;
6828 		case RMX_FULL:
6829 			*val = DRM_MODE_SCALE_FULLSCREEN;
6830 			break;
6831 		case RMX_OFF:
6832 		default:
6833 			*val = DRM_MODE_SCALE_NONE;
6834 			break;
6835 		}
6836 		ret = 0;
6837 	} else if (property == adev->mode_info.underscan_hborder_property) {
6838 		*val = dm_state->underscan_hborder;
6839 		ret = 0;
6840 	} else if (property == adev->mode_info.underscan_vborder_property) {
6841 		*val = dm_state->underscan_vborder;
6842 		ret = 0;
6843 	} else if (property == adev->mode_info.underscan_property) {
6844 		*val = dm_state->underscan_enable;
6845 		ret = 0;
6846 	} else if (property == adev->mode_info.abm_level_property) {
6847 		*val = dm_state->abm_level;
6848 		ret = 0;
6849 	}
6850 
6851 	return ret;
6852 }
6853 
6854 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6855 {
6856 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6857 
6858 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6859 }
6860 
6861 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6862 {
6863 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6864 	const struct dc_link *link = aconnector->dc_link;
6865 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6866 	struct amdgpu_display_manager *dm = &adev->dm;
6867 	int i;
6868 
6869 	/*
6870 	 * Call only if mst_mgr was iniitalized before since it's not done
6871 	 * for all connector types.
6872 	 */
6873 	if (aconnector->mst_mgr.dev)
6874 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6875 
6876 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6877 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6878 	for (i = 0; i < dm->num_of_edps; i++) {
6879 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6880 			backlight_device_unregister(dm->backlight_dev[i]);
6881 			dm->backlight_dev[i] = NULL;
6882 		}
6883 	}
6884 #endif
6885 
6886 	if (aconnector->dc_em_sink)
6887 		dc_sink_release(aconnector->dc_em_sink);
6888 	aconnector->dc_em_sink = NULL;
6889 	if (aconnector->dc_sink)
6890 		dc_sink_release(aconnector->dc_sink);
6891 	aconnector->dc_sink = NULL;
6892 
6893 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6894 	drm_connector_unregister(connector);
6895 	drm_connector_cleanup(connector);
6896 	if (aconnector->i2c) {
6897 		i2c_del_adapter(&aconnector->i2c->base);
6898 		kfree(aconnector->i2c);
6899 	}
6900 	kfree(aconnector->dm_dp_aux.aux.name);
6901 
6902 	kfree(connector);
6903 }
6904 
6905 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6906 {
6907 	struct dm_connector_state *state =
6908 		to_dm_connector_state(connector->state);
6909 
6910 	if (connector->state)
6911 		__drm_atomic_helper_connector_destroy_state(connector->state);
6912 
6913 	kfree(state);
6914 
6915 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6916 
6917 	if (state) {
6918 		state->scaling = RMX_OFF;
6919 		state->underscan_enable = false;
6920 		state->underscan_hborder = 0;
6921 		state->underscan_vborder = 0;
6922 		state->base.max_requested_bpc = 8;
6923 		state->vcpi_slots = 0;
6924 		state->pbn = 0;
6925 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6926 			state->abm_level = amdgpu_dm_abm_level;
6927 
6928 		__drm_atomic_helper_connector_reset(connector, &state->base);
6929 	}
6930 }
6931 
6932 struct drm_connector_state *
6933 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6934 {
6935 	struct dm_connector_state *state =
6936 		to_dm_connector_state(connector->state);
6937 
6938 	struct dm_connector_state *new_state =
6939 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6940 
6941 	if (!new_state)
6942 		return NULL;
6943 
6944 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6945 
6946 	new_state->freesync_capable = state->freesync_capable;
6947 	new_state->abm_level = state->abm_level;
6948 	new_state->scaling = state->scaling;
6949 	new_state->underscan_enable = state->underscan_enable;
6950 	new_state->underscan_hborder = state->underscan_hborder;
6951 	new_state->underscan_vborder = state->underscan_vborder;
6952 	new_state->vcpi_slots = state->vcpi_slots;
6953 	new_state->pbn = state->pbn;
6954 	return &new_state->base;
6955 }
6956 
6957 static int
6958 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6959 {
6960 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6961 		to_amdgpu_dm_connector(connector);
6962 	int r;
6963 
6964 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6965 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6966 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6967 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6968 		if (r)
6969 			return r;
6970 	}
6971 
6972 #if defined(CONFIG_DEBUG_FS)
6973 	connector_debugfs_init(amdgpu_dm_connector);
6974 #endif
6975 
6976 	return 0;
6977 }
6978 
6979 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6980 	.reset = amdgpu_dm_connector_funcs_reset,
6981 	.detect = amdgpu_dm_connector_detect,
6982 	.fill_modes = drm_helper_probe_single_connector_modes,
6983 	.destroy = amdgpu_dm_connector_destroy,
6984 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6985 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6986 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6987 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6988 	.late_register = amdgpu_dm_connector_late_register,
6989 	.early_unregister = amdgpu_dm_connector_unregister
6990 };
6991 
6992 static int get_modes(struct drm_connector *connector)
6993 {
6994 	return amdgpu_dm_connector_get_modes(connector);
6995 }
6996 
6997 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6998 {
6999 	struct dc_sink_init_data init_params = {
7000 			.link = aconnector->dc_link,
7001 			.sink_signal = SIGNAL_TYPE_VIRTUAL
7002 	};
7003 	struct edid *edid;
7004 
7005 	if (!aconnector->base.edid_blob_ptr) {
7006 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7007 				aconnector->base.name);
7008 
7009 		aconnector->base.force = DRM_FORCE_OFF;
7010 		aconnector->base.override_edid = false;
7011 		return;
7012 	}
7013 
7014 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7015 
7016 	aconnector->edid = edid;
7017 
7018 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7019 		aconnector->dc_link,
7020 		(uint8_t *)edid,
7021 		(edid->extensions + 1) * EDID_LENGTH,
7022 		&init_params);
7023 
7024 	if (aconnector->base.force == DRM_FORCE_ON) {
7025 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7026 		aconnector->dc_link->local_sink :
7027 		aconnector->dc_em_sink;
7028 		dc_sink_retain(aconnector->dc_sink);
7029 	}
7030 }
7031 
7032 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7033 {
7034 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7035 
7036 	/*
7037 	 * In case of headless boot with force on for DP managed connector
7038 	 * Those settings have to be != 0 to get initial modeset
7039 	 */
7040 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7041 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7042 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7043 	}
7044 
7045 
7046 	aconnector->base.override_edid = true;
7047 	create_eml_sink(aconnector);
7048 }
7049 
7050 struct dc_stream_state *
7051 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7052 				const struct drm_display_mode *drm_mode,
7053 				const struct dm_connector_state *dm_state,
7054 				const struct dc_stream_state *old_stream)
7055 {
7056 	struct drm_connector *connector = &aconnector->base;
7057 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7058 	struct dc_stream_state *stream;
7059 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7060 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7061 	enum dc_status dc_result = DC_OK;
7062 
7063 	do {
7064 		stream = create_stream_for_sink(aconnector, drm_mode,
7065 						dm_state, old_stream,
7066 						requested_bpc);
7067 		if (stream == NULL) {
7068 			DRM_ERROR("Failed to create stream for sink!\n");
7069 			break;
7070 		}
7071 
7072 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7073 
7074 		if (dc_result != DC_OK) {
7075 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7076 				      drm_mode->hdisplay,
7077 				      drm_mode->vdisplay,
7078 				      drm_mode->clock,
7079 				      dc_result,
7080 				      dc_status_to_str(dc_result));
7081 
7082 			dc_stream_release(stream);
7083 			stream = NULL;
7084 			requested_bpc -= 2; /* lower bpc to retry validation */
7085 		}
7086 
7087 	} while (stream == NULL && requested_bpc >= 6);
7088 
7089 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7090 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7091 
7092 		aconnector->force_yuv420_output = true;
7093 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7094 						dm_state, old_stream);
7095 		aconnector->force_yuv420_output = false;
7096 	}
7097 
7098 	return stream;
7099 }
7100 
7101 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7102 				   struct drm_display_mode *mode)
7103 {
7104 	int result = MODE_ERROR;
7105 	struct dc_sink *dc_sink;
7106 	/* TODO: Unhardcode stream count */
7107 	struct dc_stream_state *stream;
7108 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7109 
7110 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7111 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7112 		return result;
7113 
7114 	/*
7115 	 * Only run this the first time mode_valid is called to initilialize
7116 	 * EDID mgmt
7117 	 */
7118 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7119 		!aconnector->dc_em_sink)
7120 		handle_edid_mgmt(aconnector);
7121 
7122 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7123 
7124 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7125 				aconnector->base.force != DRM_FORCE_ON) {
7126 		DRM_ERROR("dc_sink is NULL!\n");
7127 		goto fail;
7128 	}
7129 
7130 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7131 	if (stream) {
7132 		dc_stream_release(stream);
7133 		result = MODE_OK;
7134 	}
7135 
7136 fail:
7137 	/* TODO: error handling*/
7138 	return result;
7139 }
7140 
7141 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7142 				struct dc_info_packet *out)
7143 {
7144 	struct hdmi_drm_infoframe frame;
7145 	unsigned char buf[30]; /* 26 + 4 */
7146 	ssize_t len;
7147 	int ret, i;
7148 
7149 	memset(out, 0, sizeof(*out));
7150 
7151 	if (!state->hdr_output_metadata)
7152 		return 0;
7153 
7154 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7155 	if (ret)
7156 		return ret;
7157 
7158 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7159 	if (len < 0)
7160 		return (int)len;
7161 
7162 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7163 	if (len != 30)
7164 		return -EINVAL;
7165 
7166 	/* Prepare the infopacket for DC. */
7167 	switch (state->connector->connector_type) {
7168 	case DRM_MODE_CONNECTOR_HDMIA:
7169 		out->hb0 = 0x87; /* type */
7170 		out->hb1 = 0x01; /* version */
7171 		out->hb2 = 0x1A; /* length */
7172 		out->sb[0] = buf[3]; /* checksum */
7173 		i = 1;
7174 		break;
7175 
7176 	case DRM_MODE_CONNECTOR_DisplayPort:
7177 	case DRM_MODE_CONNECTOR_eDP:
7178 		out->hb0 = 0x00; /* sdp id, zero */
7179 		out->hb1 = 0x87; /* type */
7180 		out->hb2 = 0x1D; /* payload len - 1 */
7181 		out->hb3 = (0x13 << 2); /* sdp version */
7182 		out->sb[0] = 0x01; /* version */
7183 		out->sb[1] = 0x1A; /* length */
7184 		i = 2;
7185 		break;
7186 
7187 	default:
7188 		return -EINVAL;
7189 	}
7190 
7191 	memcpy(&out->sb[i], &buf[4], 26);
7192 	out->valid = true;
7193 
7194 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7195 		       sizeof(out->sb), false);
7196 
7197 	return 0;
7198 }
7199 
7200 static int
7201 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7202 				 struct drm_atomic_state *state)
7203 {
7204 	struct drm_connector_state *new_con_state =
7205 		drm_atomic_get_new_connector_state(state, conn);
7206 	struct drm_connector_state *old_con_state =
7207 		drm_atomic_get_old_connector_state(state, conn);
7208 	struct drm_crtc *crtc = new_con_state->crtc;
7209 	struct drm_crtc_state *new_crtc_state;
7210 	int ret;
7211 
7212 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7213 
7214 	if (!crtc)
7215 		return 0;
7216 
7217 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7218 		struct dc_info_packet hdr_infopacket;
7219 
7220 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7221 		if (ret)
7222 			return ret;
7223 
7224 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7225 		if (IS_ERR(new_crtc_state))
7226 			return PTR_ERR(new_crtc_state);
7227 
7228 		/*
7229 		 * DC considers the stream backends changed if the
7230 		 * static metadata changes. Forcing the modeset also
7231 		 * gives a simple way for userspace to switch from
7232 		 * 8bpc to 10bpc when setting the metadata to enter
7233 		 * or exit HDR.
7234 		 *
7235 		 * Changing the static metadata after it's been
7236 		 * set is permissible, however. So only force a
7237 		 * modeset if we're entering or exiting HDR.
7238 		 */
7239 		new_crtc_state->mode_changed =
7240 			!old_con_state->hdr_output_metadata ||
7241 			!new_con_state->hdr_output_metadata;
7242 	}
7243 
7244 	return 0;
7245 }
7246 
7247 static const struct drm_connector_helper_funcs
7248 amdgpu_dm_connector_helper_funcs = {
7249 	/*
7250 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7251 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7252 	 * are missing after user start lightdm. So we need to renew modes list.
7253 	 * in get_modes call back, not just return the modes count
7254 	 */
7255 	.get_modes = get_modes,
7256 	.mode_valid = amdgpu_dm_connector_mode_valid,
7257 	.atomic_check = amdgpu_dm_connector_atomic_check,
7258 };
7259 
7260 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7261 {
7262 }
7263 
7264 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7265 {
7266 	struct drm_atomic_state *state = new_crtc_state->state;
7267 	struct drm_plane *plane;
7268 	int num_active = 0;
7269 
7270 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7271 		struct drm_plane_state *new_plane_state;
7272 
7273 		/* Cursor planes are "fake". */
7274 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7275 			continue;
7276 
7277 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7278 
7279 		if (!new_plane_state) {
7280 			/*
7281 			 * The plane is enable on the CRTC and hasn't changed
7282 			 * state. This means that it previously passed
7283 			 * validation and is therefore enabled.
7284 			 */
7285 			num_active += 1;
7286 			continue;
7287 		}
7288 
7289 		/* We need a framebuffer to be considered enabled. */
7290 		num_active += (new_plane_state->fb != NULL);
7291 	}
7292 
7293 	return num_active;
7294 }
7295 
7296 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7297 					 struct drm_crtc_state *new_crtc_state)
7298 {
7299 	struct dm_crtc_state *dm_new_crtc_state =
7300 		to_dm_crtc_state(new_crtc_state);
7301 
7302 	dm_new_crtc_state->active_planes = 0;
7303 
7304 	if (!dm_new_crtc_state->stream)
7305 		return;
7306 
7307 	dm_new_crtc_state->active_planes =
7308 		count_crtc_active_planes(new_crtc_state);
7309 }
7310 
7311 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7312 				       struct drm_atomic_state *state)
7313 {
7314 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7315 									  crtc);
7316 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7317 	struct dc *dc = adev->dm.dc;
7318 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7319 	int ret = -EINVAL;
7320 
7321 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7322 
7323 	dm_update_crtc_active_planes(crtc, crtc_state);
7324 
7325 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7326 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7327 		return ret;
7328 	}
7329 
7330 	/*
7331 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7332 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7333 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7334 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7335 	 */
7336 	if (crtc_state->enable &&
7337 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7338 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7339 		return -EINVAL;
7340 	}
7341 
7342 	/* In some use cases, like reset, no stream is attached */
7343 	if (!dm_crtc_state->stream)
7344 		return 0;
7345 
7346 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7347 		return 0;
7348 
7349 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7350 	return ret;
7351 }
7352 
7353 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7354 				      const struct drm_display_mode *mode,
7355 				      struct drm_display_mode *adjusted_mode)
7356 {
7357 	return true;
7358 }
7359 
7360 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7361 	.disable = dm_crtc_helper_disable,
7362 	.atomic_check = dm_crtc_helper_atomic_check,
7363 	.mode_fixup = dm_crtc_helper_mode_fixup,
7364 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7365 };
7366 
7367 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7368 {
7369 
7370 }
7371 
7372 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7373 {
7374 	switch (display_color_depth) {
7375 		case COLOR_DEPTH_666:
7376 			return 6;
7377 		case COLOR_DEPTH_888:
7378 			return 8;
7379 		case COLOR_DEPTH_101010:
7380 			return 10;
7381 		case COLOR_DEPTH_121212:
7382 			return 12;
7383 		case COLOR_DEPTH_141414:
7384 			return 14;
7385 		case COLOR_DEPTH_161616:
7386 			return 16;
7387 		default:
7388 			break;
7389 		}
7390 	return 0;
7391 }
7392 
7393 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7394 					  struct drm_crtc_state *crtc_state,
7395 					  struct drm_connector_state *conn_state)
7396 {
7397 	struct drm_atomic_state *state = crtc_state->state;
7398 	struct drm_connector *connector = conn_state->connector;
7399 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7400 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7401 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7402 	struct drm_dp_mst_topology_mgr *mst_mgr;
7403 	struct drm_dp_mst_port *mst_port;
7404 	enum dc_color_depth color_depth;
7405 	int clock, bpp = 0;
7406 	bool is_y420 = false;
7407 
7408 	if (!aconnector->port || !aconnector->dc_sink)
7409 		return 0;
7410 
7411 	mst_port = aconnector->port;
7412 	mst_mgr = &aconnector->mst_port->mst_mgr;
7413 
7414 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7415 		return 0;
7416 
7417 	if (!state->duplicated) {
7418 		int max_bpc = conn_state->max_requested_bpc;
7419 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7420 				aconnector->force_yuv420_output;
7421 		color_depth = convert_color_depth_from_display_info(connector,
7422 								    is_y420,
7423 								    max_bpc);
7424 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7425 		clock = adjusted_mode->clock;
7426 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7427 	}
7428 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7429 									   mst_mgr,
7430 									   mst_port,
7431 									   dm_new_connector_state->pbn,
7432 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7433 	if (dm_new_connector_state->vcpi_slots < 0) {
7434 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7435 		return dm_new_connector_state->vcpi_slots;
7436 	}
7437 	return 0;
7438 }
7439 
7440 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7441 	.disable = dm_encoder_helper_disable,
7442 	.atomic_check = dm_encoder_helper_atomic_check
7443 };
7444 
7445 #if defined(CONFIG_DRM_AMD_DC_DCN)
7446 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7447 					    struct dc_state *dc_state,
7448 					    struct dsc_mst_fairness_vars *vars)
7449 {
7450 	struct dc_stream_state *stream = NULL;
7451 	struct drm_connector *connector;
7452 	struct drm_connector_state *new_con_state;
7453 	struct amdgpu_dm_connector *aconnector;
7454 	struct dm_connector_state *dm_conn_state;
7455 	int i, j;
7456 	int vcpi, pbn_div, pbn, slot_num = 0;
7457 
7458 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7459 
7460 		aconnector = to_amdgpu_dm_connector(connector);
7461 
7462 		if (!aconnector->port)
7463 			continue;
7464 
7465 		if (!new_con_state || !new_con_state->crtc)
7466 			continue;
7467 
7468 		dm_conn_state = to_dm_connector_state(new_con_state);
7469 
7470 		for (j = 0; j < dc_state->stream_count; j++) {
7471 			stream = dc_state->streams[j];
7472 			if (!stream)
7473 				continue;
7474 
7475 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7476 				break;
7477 
7478 			stream = NULL;
7479 		}
7480 
7481 		if (!stream)
7482 			continue;
7483 
7484 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7485 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7486 		for (j = 0; j < dc_state->stream_count; j++) {
7487 			if (vars[j].aconnector == aconnector) {
7488 				pbn = vars[j].pbn;
7489 				break;
7490 			}
7491 		}
7492 
7493 		if (j == dc_state->stream_count)
7494 			continue;
7495 
7496 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7497 
7498 		if (stream->timing.flags.DSC != 1) {
7499 			dm_conn_state->pbn = pbn;
7500 			dm_conn_state->vcpi_slots = slot_num;
7501 
7502 			drm_dp_mst_atomic_enable_dsc(state,
7503 						     aconnector->port,
7504 						     dm_conn_state->pbn,
7505 						     0,
7506 						     false);
7507 			continue;
7508 		}
7509 
7510 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7511 						    aconnector->port,
7512 						    pbn, pbn_div,
7513 						    true);
7514 		if (vcpi < 0)
7515 			return vcpi;
7516 
7517 		dm_conn_state->pbn = pbn;
7518 		dm_conn_state->vcpi_slots = vcpi;
7519 	}
7520 	return 0;
7521 }
7522 #endif
7523 
7524 static void dm_drm_plane_reset(struct drm_plane *plane)
7525 {
7526 	struct dm_plane_state *amdgpu_state = NULL;
7527 
7528 	if (plane->state)
7529 		plane->funcs->atomic_destroy_state(plane, plane->state);
7530 
7531 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7532 	WARN_ON(amdgpu_state == NULL);
7533 
7534 	if (amdgpu_state)
7535 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7536 }
7537 
7538 static struct drm_plane_state *
7539 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7540 {
7541 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7542 
7543 	old_dm_plane_state = to_dm_plane_state(plane->state);
7544 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7545 	if (!dm_plane_state)
7546 		return NULL;
7547 
7548 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7549 
7550 	if (old_dm_plane_state->dc_state) {
7551 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7552 		dc_plane_state_retain(dm_plane_state->dc_state);
7553 	}
7554 
7555 	return &dm_plane_state->base;
7556 }
7557 
7558 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7559 				struct drm_plane_state *state)
7560 {
7561 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7562 
7563 	if (dm_plane_state->dc_state)
7564 		dc_plane_state_release(dm_plane_state->dc_state);
7565 
7566 	drm_atomic_helper_plane_destroy_state(plane, state);
7567 }
7568 
7569 static const struct drm_plane_funcs dm_plane_funcs = {
7570 	.update_plane	= drm_atomic_helper_update_plane,
7571 	.disable_plane	= drm_atomic_helper_disable_plane,
7572 	.destroy	= drm_primary_helper_destroy,
7573 	.reset = dm_drm_plane_reset,
7574 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7575 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7576 	.format_mod_supported = dm_plane_format_mod_supported,
7577 };
7578 
7579 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7580 				      struct drm_plane_state *new_state)
7581 {
7582 	struct amdgpu_framebuffer *afb;
7583 	struct drm_gem_object *obj;
7584 	struct amdgpu_device *adev;
7585 	struct amdgpu_bo *rbo;
7586 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7587 	uint32_t domain;
7588 	int r;
7589 
7590 	if (!new_state->fb) {
7591 		DRM_DEBUG_KMS("No FB bound\n");
7592 		return 0;
7593 	}
7594 
7595 	afb = to_amdgpu_framebuffer(new_state->fb);
7596 	obj = new_state->fb->obj[0];
7597 	rbo = gem_to_amdgpu_bo(obj);
7598 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7599 
7600 	r = amdgpu_bo_reserve(rbo, true);
7601 	if (r) {
7602 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7603 		return r;
7604 	}
7605 
7606 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7607 	if (r) {
7608 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7609 		goto error_unlock;
7610 	}
7611 
7612 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7613 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7614 	else
7615 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7616 
7617 	r = amdgpu_bo_pin(rbo, domain);
7618 	if (unlikely(r != 0)) {
7619 		if (r != -ERESTARTSYS)
7620 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7621 		goto error_unlock;
7622 	}
7623 
7624 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7625 	if (unlikely(r != 0)) {
7626 		DRM_ERROR("%p bind failed\n", rbo);
7627 		goto error_unpin;
7628 	}
7629 
7630 	amdgpu_bo_unreserve(rbo);
7631 
7632 	afb->address = amdgpu_bo_gpu_offset(rbo);
7633 
7634 	amdgpu_bo_ref(rbo);
7635 
7636 	/**
7637 	 * We don't do surface updates on planes that have been newly created,
7638 	 * but we also don't have the afb->address during atomic check.
7639 	 *
7640 	 * Fill in buffer attributes depending on the address here, but only on
7641 	 * newly created planes since they're not being used by DC yet and this
7642 	 * won't modify global state.
7643 	 */
7644 	dm_plane_state_old = to_dm_plane_state(plane->state);
7645 	dm_plane_state_new = to_dm_plane_state(new_state);
7646 
7647 	if (dm_plane_state_new->dc_state &&
7648 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7649 		struct dc_plane_state *plane_state =
7650 			dm_plane_state_new->dc_state;
7651 		bool force_disable_dcc = !plane_state->dcc.enable;
7652 
7653 		fill_plane_buffer_attributes(
7654 			adev, afb, plane_state->format, plane_state->rotation,
7655 			afb->tiling_flags,
7656 			&plane_state->tiling_info, &plane_state->plane_size,
7657 			&plane_state->dcc, &plane_state->address,
7658 			afb->tmz_surface, force_disable_dcc);
7659 	}
7660 
7661 	return 0;
7662 
7663 error_unpin:
7664 	amdgpu_bo_unpin(rbo);
7665 
7666 error_unlock:
7667 	amdgpu_bo_unreserve(rbo);
7668 	return r;
7669 }
7670 
7671 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7672 				       struct drm_plane_state *old_state)
7673 {
7674 	struct amdgpu_bo *rbo;
7675 	int r;
7676 
7677 	if (!old_state->fb)
7678 		return;
7679 
7680 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7681 	r = amdgpu_bo_reserve(rbo, false);
7682 	if (unlikely(r)) {
7683 		DRM_ERROR("failed to reserve rbo before unpin\n");
7684 		return;
7685 	}
7686 
7687 	amdgpu_bo_unpin(rbo);
7688 	amdgpu_bo_unreserve(rbo);
7689 	amdgpu_bo_unref(&rbo);
7690 }
7691 
7692 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7693 				       struct drm_crtc_state *new_crtc_state)
7694 {
7695 	struct drm_framebuffer *fb = state->fb;
7696 	int min_downscale, max_upscale;
7697 	int min_scale = 0;
7698 	int max_scale = INT_MAX;
7699 
7700 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7701 	if (fb && state->crtc) {
7702 		/* Validate viewport to cover the case when only the position changes */
7703 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7704 			int viewport_width = state->crtc_w;
7705 			int viewport_height = state->crtc_h;
7706 
7707 			if (state->crtc_x < 0)
7708 				viewport_width += state->crtc_x;
7709 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7710 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7711 
7712 			if (state->crtc_y < 0)
7713 				viewport_height += state->crtc_y;
7714 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7715 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7716 
7717 			if (viewport_width < 0 || viewport_height < 0) {
7718 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7719 				return -EINVAL;
7720 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7721 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7722 				return -EINVAL;
7723 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7724 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7725 				return -EINVAL;
7726 			}
7727 
7728 		}
7729 
7730 		/* Get min/max allowed scaling factors from plane caps. */
7731 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7732 					     &min_downscale, &max_upscale);
7733 		/*
7734 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7735 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7736 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7737 		 */
7738 		min_scale = (1000 << 16) / max_upscale;
7739 		max_scale = (1000 << 16) / min_downscale;
7740 	}
7741 
7742 	return drm_atomic_helper_check_plane_state(
7743 		state, new_crtc_state, min_scale, max_scale, true, true);
7744 }
7745 
7746 static int dm_plane_atomic_check(struct drm_plane *plane,
7747 				 struct drm_atomic_state *state)
7748 {
7749 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7750 										 plane);
7751 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7752 	struct dc *dc = adev->dm.dc;
7753 	struct dm_plane_state *dm_plane_state;
7754 	struct dc_scaling_info scaling_info;
7755 	struct drm_crtc_state *new_crtc_state;
7756 	int ret;
7757 
7758 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7759 
7760 	dm_plane_state = to_dm_plane_state(new_plane_state);
7761 
7762 	if (!dm_plane_state->dc_state)
7763 		return 0;
7764 
7765 	new_crtc_state =
7766 		drm_atomic_get_new_crtc_state(state,
7767 					      new_plane_state->crtc);
7768 	if (!new_crtc_state)
7769 		return -EINVAL;
7770 
7771 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7772 	if (ret)
7773 		return ret;
7774 
7775 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7776 	if (ret)
7777 		return ret;
7778 
7779 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7780 		return 0;
7781 
7782 	return -EINVAL;
7783 }
7784 
7785 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7786 				       struct drm_atomic_state *state)
7787 {
7788 	/* Only support async updates on cursor planes. */
7789 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7790 		return -EINVAL;
7791 
7792 	return 0;
7793 }
7794 
7795 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7796 					 struct drm_atomic_state *state)
7797 {
7798 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7799 									   plane);
7800 	struct drm_plane_state *old_state =
7801 		drm_atomic_get_old_plane_state(state, plane);
7802 
7803 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7804 
7805 	swap(plane->state->fb, new_state->fb);
7806 
7807 	plane->state->src_x = new_state->src_x;
7808 	plane->state->src_y = new_state->src_y;
7809 	plane->state->src_w = new_state->src_w;
7810 	plane->state->src_h = new_state->src_h;
7811 	plane->state->crtc_x = new_state->crtc_x;
7812 	plane->state->crtc_y = new_state->crtc_y;
7813 	plane->state->crtc_w = new_state->crtc_w;
7814 	plane->state->crtc_h = new_state->crtc_h;
7815 
7816 	handle_cursor_update(plane, old_state);
7817 }
7818 
7819 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7820 	.prepare_fb = dm_plane_helper_prepare_fb,
7821 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7822 	.atomic_check = dm_plane_atomic_check,
7823 	.atomic_async_check = dm_plane_atomic_async_check,
7824 	.atomic_async_update = dm_plane_atomic_async_update
7825 };
7826 
7827 /*
7828  * TODO: these are currently initialized to rgb formats only.
7829  * For future use cases we should either initialize them dynamically based on
7830  * plane capabilities, or initialize this array to all formats, so internal drm
7831  * check will succeed, and let DC implement proper check
7832  */
7833 static const uint32_t rgb_formats[] = {
7834 	DRM_FORMAT_XRGB8888,
7835 	DRM_FORMAT_ARGB8888,
7836 	DRM_FORMAT_RGBA8888,
7837 	DRM_FORMAT_XRGB2101010,
7838 	DRM_FORMAT_XBGR2101010,
7839 	DRM_FORMAT_ARGB2101010,
7840 	DRM_FORMAT_ABGR2101010,
7841 	DRM_FORMAT_XRGB16161616,
7842 	DRM_FORMAT_XBGR16161616,
7843 	DRM_FORMAT_ARGB16161616,
7844 	DRM_FORMAT_ABGR16161616,
7845 	DRM_FORMAT_XBGR8888,
7846 	DRM_FORMAT_ABGR8888,
7847 	DRM_FORMAT_RGB565,
7848 };
7849 
7850 static const uint32_t overlay_formats[] = {
7851 	DRM_FORMAT_XRGB8888,
7852 	DRM_FORMAT_ARGB8888,
7853 	DRM_FORMAT_RGBA8888,
7854 	DRM_FORMAT_XBGR8888,
7855 	DRM_FORMAT_ABGR8888,
7856 	DRM_FORMAT_RGB565
7857 };
7858 
7859 static const u32 cursor_formats[] = {
7860 	DRM_FORMAT_ARGB8888
7861 };
7862 
7863 static int get_plane_formats(const struct drm_plane *plane,
7864 			     const struct dc_plane_cap *plane_cap,
7865 			     uint32_t *formats, int max_formats)
7866 {
7867 	int i, num_formats = 0;
7868 
7869 	/*
7870 	 * TODO: Query support for each group of formats directly from
7871 	 * DC plane caps. This will require adding more formats to the
7872 	 * caps list.
7873 	 */
7874 
7875 	switch (plane->type) {
7876 	case DRM_PLANE_TYPE_PRIMARY:
7877 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7878 			if (num_formats >= max_formats)
7879 				break;
7880 
7881 			formats[num_formats++] = rgb_formats[i];
7882 		}
7883 
7884 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7885 			formats[num_formats++] = DRM_FORMAT_NV12;
7886 		if (plane_cap && plane_cap->pixel_format_support.p010)
7887 			formats[num_formats++] = DRM_FORMAT_P010;
7888 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7889 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7890 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7891 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7892 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7893 		}
7894 		break;
7895 
7896 	case DRM_PLANE_TYPE_OVERLAY:
7897 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7898 			if (num_formats >= max_formats)
7899 				break;
7900 
7901 			formats[num_formats++] = overlay_formats[i];
7902 		}
7903 		break;
7904 
7905 	case DRM_PLANE_TYPE_CURSOR:
7906 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7907 			if (num_formats >= max_formats)
7908 				break;
7909 
7910 			formats[num_formats++] = cursor_formats[i];
7911 		}
7912 		break;
7913 	}
7914 
7915 	return num_formats;
7916 }
7917 
7918 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7919 				struct drm_plane *plane,
7920 				unsigned long possible_crtcs,
7921 				const struct dc_plane_cap *plane_cap)
7922 {
7923 	uint32_t formats[32];
7924 	int num_formats;
7925 	int res = -EPERM;
7926 	unsigned int supported_rotations;
7927 	uint64_t *modifiers = NULL;
7928 
7929 	num_formats = get_plane_formats(plane, plane_cap, formats,
7930 					ARRAY_SIZE(formats));
7931 
7932 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7933 	if (res)
7934 		return res;
7935 
7936 	if (modifiers == NULL)
7937 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7938 
7939 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7940 				       &dm_plane_funcs, formats, num_formats,
7941 				       modifiers, plane->type, NULL);
7942 	kfree(modifiers);
7943 	if (res)
7944 		return res;
7945 
7946 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7947 	    plane_cap && plane_cap->per_pixel_alpha) {
7948 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7949 					  BIT(DRM_MODE_BLEND_PREMULTI);
7950 
7951 		drm_plane_create_alpha_property(plane);
7952 		drm_plane_create_blend_mode_property(plane, blend_caps);
7953 	}
7954 
7955 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7956 	    plane_cap &&
7957 	    (plane_cap->pixel_format_support.nv12 ||
7958 	     plane_cap->pixel_format_support.p010)) {
7959 		/* This only affects YUV formats. */
7960 		drm_plane_create_color_properties(
7961 			plane,
7962 			BIT(DRM_COLOR_YCBCR_BT601) |
7963 			BIT(DRM_COLOR_YCBCR_BT709) |
7964 			BIT(DRM_COLOR_YCBCR_BT2020),
7965 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7966 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7967 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7968 	}
7969 
7970 	supported_rotations =
7971 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7972 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7973 
7974 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7975 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7976 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7977 						   supported_rotations);
7978 
7979 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7980 
7981 	/* Create (reset) the plane state */
7982 	if (plane->funcs->reset)
7983 		plane->funcs->reset(plane);
7984 
7985 	return 0;
7986 }
7987 
7988 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7989 			       struct drm_plane *plane,
7990 			       uint32_t crtc_index)
7991 {
7992 	struct amdgpu_crtc *acrtc = NULL;
7993 	struct drm_plane *cursor_plane;
7994 
7995 	int res = -ENOMEM;
7996 
7997 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7998 	if (!cursor_plane)
7999 		goto fail;
8000 
8001 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8002 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8003 
8004 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8005 	if (!acrtc)
8006 		goto fail;
8007 
8008 	res = drm_crtc_init_with_planes(
8009 			dm->ddev,
8010 			&acrtc->base,
8011 			plane,
8012 			cursor_plane,
8013 			&amdgpu_dm_crtc_funcs, NULL);
8014 
8015 	if (res)
8016 		goto fail;
8017 
8018 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8019 
8020 	/* Create (reset) the plane state */
8021 	if (acrtc->base.funcs->reset)
8022 		acrtc->base.funcs->reset(&acrtc->base);
8023 
8024 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8025 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8026 
8027 	acrtc->crtc_id = crtc_index;
8028 	acrtc->base.enabled = false;
8029 	acrtc->otg_inst = -1;
8030 
8031 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8032 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8033 				   true, MAX_COLOR_LUT_ENTRIES);
8034 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8035 
8036 	return 0;
8037 
8038 fail:
8039 	kfree(acrtc);
8040 	kfree(cursor_plane);
8041 	return res;
8042 }
8043 
8044 
8045 static int to_drm_connector_type(enum signal_type st)
8046 {
8047 	switch (st) {
8048 	case SIGNAL_TYPE_HDMI_TYPE_A:
8049 		return DRM_MODE_CONNECTOR_HDMIA;
8050 	case SIGNAL_TYPE_EDP:
8051 		return DRM_MODE_CONNECTOR_eDP;
8052 	case SIGNAL_TYPE_LVDS:
8053 		return DRM_MODE_CONNECTOR_LVDS;
8054 	case SIGNAL_TYPE_RGB:
8055 		return DRM_MODE_CONNECTOR_VGA;
8056 	case SIGNAL_TYPE_DISPLAY_PORT:
8057 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8058 		return DRM_MODE_CONNECTOR_DisplayPort;
8059 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8060 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8061 		return DRM_MODE_CONNECTOR_DVID;
8062 	case SIGNAL_TYPE_VIRTUAL:
8063 		return DRM_MODE_CONNECTOR_VIRTUAL;
8064 
8065 	default:
8066 		return DRM_MODE_CONNECTOR_Unknown;
8067 	}
8068 }
8069 
8070 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8071 {
8072 	struct drm_encoder *encoder;
8073 
8074 	/* There is only one encoder per connector */
8075 	drm_connector_for_each_possible_encoder(connector, encoder)
8076 		return encoder;
8077 
8078 	return NULL;
8079 }
8080 
8081 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8082 {
8083 	struct drm_encoder *encoder;
8084 	struct amdgpu_encoder *amdgpu_encoder;
8085 
8086 	encoder = amdgpu_dm_connector_to_encoder(connector);
8087 
8088 	if (encoder == NULL)
8089 		return;
8090 
8091 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8092 
8093 	amdgpu_encoder->native_mode.clock = 0;
8094 
8095 	if (!list_empty(&connector->probed_modes)) {
8096 		struct drm_display_mode *preferred_mode = NULL;
8097 
8098 		list_for_each_entry(preferred_mode,
8099 				    &connector->probed_modes,
8100 				    head) {
8101 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8102 				amdgpu_encoder->native_mode = *preferred_mode;
8103 
8104 			break;
8105 		}
8106 
8107 	}
8108 }
8109 
8110 static struct drm_display_mode *
8111 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8112 			     char *name,
8113 			     int hdisplay, int vdisplay)
8114 {
8115 	struct drm_device *dev = encoder->dev;
8116 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8117 	struct drm_display_mode *mode = NULL;
8118 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8119 
8120 	mode = drm_mode_duplicate(dev, native_mode);
8121 
8122 	if (mode == NULL)
8123 		return NULL;
8124 
8125 	mode->hdisplay = hdisplay;
8126 	mode->vdisplay = vdisplay;
8127 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8128 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8129 
8130 	return mode;
8131 
8132 }
8133 
8134 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8135 						 struct drm_connector *connector)
8136 {
8137 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8138 	struct drm_display_mode *mode = NULL;
8139 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8140 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8141 				to_amdgpu_dm_connector(connector);
8142 	int i;
8143 	int n;
8144 	struct mode_size {
8145 		char name[DRM_DISPLAY_MODE_LEN];
8146 		int w;
8147 		int h;
8148 	} common_modes[] = {
8149 		{  "640x480",  640,  480},
8150 		{  "800x600",  800,  600},
8151 		{ "1024x768", 1024,  768},
8152 		{ "1280x720", 1280,  720},
8153 		{ "1280x800", 1280,  800},
8154 		{"1280x1024", 1280, 1024},
8155 		{ "1440x900", 1440,  900},
8156 		{"1680x1050", 1680, 1050},
8157 		{"1600x1200", 1600, 1200},
8158 		{"1920x1080", 1920, 1080},
8159 		{"1920x1200", 1920, 1200}
8160 	};
8161 
8162 	n = ARRAY_SIZE(common_modes);
8163 
8164 	for (i = 0; i < n; i++) {
8165 		struct drm_display_mode *curmode = NULL;
8166 		bool mode_existed = false;
8167 
8168 		if (common_modes[i].w > native_mode->hdisplay ||
8169 		    common_modes[i].h > native_mode->vdisplay ||
8170 		   (common_modes[i].w == native_mode->hdisplay &&
8171 		    common_modes[i].h == native_mode->vdisplay))
8172 			continue;
8173 
8174 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8175 			if (common_modes[i].w == curmode->hdisplay &&
8176 			    common_modes[i].h == curmode->vdisplay) {
8177 				mode_existed = true;
8178 				break;
8179 			}
8180 		}
8181 
8182 		if (mode_existed)
8183 			continue;
8184 
8185 		mode = amdgpu_dm_create_common_mode(encoder,
8186 				common_modes[i].name, common_modes[i].w,
8187 				common_modes[i].h);
8188 		if (!mode)
8189 			continue;
8190 
8191 		drm_mode_probed_add(connector, mode);
8192 		amdgpu_dm_connector->num_modes++;
8193 	}
8194 }
8195 
8196 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8197 {
8198 	struct drm_encoder *encoder;
8199 	struct amdgpu_encoder *amdgpu_encoder;
8200 	const struct drm_display_mode *native_mode;
8201 
8202 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8203 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8204 		return;
8205 
8206 	encoder = amdgpu_dm_connector_to_encoder(connector);
8207 	if (!encoder)
8208 		return;
8209 
8210 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8211 
8212 	native_mode = &amdgpu_encoder->native_mode;
8213 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8214 		return;
8215 
8216 	drm_connector_set_panel_orientation_with_quirk(connector,
8217 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8218 						       native_mode->hdisplay,
8219 						       native_mode->vdisplay);
8220 }
8221 
8222 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8223 					      struct edid *edid)
8224 {
8225 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8226 			to_amdgpu_dm_connector(connector);
8227 
8228 	if (edid) {
8229 		/* empty probed_modes */
8230 		INIT_LIST_HEAD(&connector->probed_modes);
8231 		amdgpu_dm_connector->num_modes =
8232 				drm_add_edid_modes(connector, edid);
8233 
8234 		/* sorting the probed modes before calling function
8235 		 * amdgpu_dm_get_native_mode() since EDID can have
8236 		 * more than one preferred mode. The modes that are
8237 		 * later in the probed mode list could be of higher
8238 		 * and preferred resolution. For example, 3840x2160
8239 		 * resolution in base EDID preferred timing and 4096x2160
8240 		 * preferred resolution in DID extension block later.
8241 		 */
8242 		drm_mode_sort(&connector->probed_modes);
8243 		amdgpu_dm_get_native_mode(connector);
8244 
8245 		/* Freesync capabilities are reset by calling
8246 		 * drm_add_edid_modes() and need to be
8247 		 * restored here.
8248 		 */
8249 		amdgpu_dm_update_freesync_caps(connector, edid);
8250 
8251 		amdgpu_set_panel_orientation(connector);
8252 	} else {
8253 		amdgpu_dm_connector->num_modes = 0;
8254 	}
8255 }
8256 
8257 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8258 			      struct drm_display_mode *mode)
8259 {
8260 	struct drm_display_mode *m;
8261 
8262 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8263 		if (drm_mode_equal(m, mode))
8264 			return true;
8265 	}
8266 
8267 	return false;
8268 }
8269 
8270 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8271 {
8272 	const struct drm_display_mode *m;
8273 	struct drm_display_mode *new_mode;
8274 	uint i;
8275 	uint32_t new_modes_count = 0;
8276 
8277 	/* Standard FPS values
8278 	 *
8279 	 * 23.976       - TV/NTSC
8280 	 * 24 	        - Cinema
8281 	 * 25 	        - TV/PAL
8282 	 * 29.97        - TV/NTSC
8283 	 * 30 	        - TV/NTSC
8284 	 * 48 	        - Cinema HFR
8285 	 * 50 	        - TV/PAL
8286 	 * 60 	        - Commonly used
8287 	 * 48,72,96,120 - Multiples of 24
8288 	 */
8289 	static const uint32_t common_rates[] = {
8290 		23976, 24000, 25000, 29970, 30000,
8291 		48000, 50000, 60000, 72000, 96000, 120000
8292 	};
8293 
8294 	/*
8295 	 * Find mode with highest refresh rate with the same resolution
8296 	 * as the preferred mode. Some monitors report a preferred mode
8297 	 * with lower resolution than the highest refresh rate supported.
8298 	 */
8299 
8300 	m = get_highest_refresh_rate_mode(aconnector, true);
8301 	if (!m)
8302 		return 0;
8303 
8304 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8305 		uint64_t target_vtotal, target_vtotal_diff;
8306 		uint64_t num, den;
8307 
8308 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8309 			continue;
8310 
8311 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8312 		    common_rates[i] > aconnector->max_vfreq * 1000)
8313 			continue;
8314 
8315 		num = (unsigned long long)m->clock * 1000 * 1000;
8316 		den = common_rates[i] * (unsigned long long)m->htotal;
8317 		target_vtotal = div_u64(num, den);
8318 		target_vtotal_diff = target_vtotal - m->vtotal;
8319 
8320 		/* Check for illegal modes */
8321 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8322 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8323 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8324 			continue;
8325 
8326 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8327 		if (!new_mode)
8328 			goto out;
8329 
8330 		new_mode->vtotal += (u16)target_vtotal_diff;
8331 		new_mode->vsync_start += (u16)target_vtotal_diff;
8332 		new_mode->vsync_end += (u16)target_vtotal_diff;
8333 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8334 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8335 
8336 		if (!is_duplicate_mode(aconnector, new_mode)) {
8337 			drm_mode_probed_add(&aconnector->base, new_mode);
8338 			new_modes_count += 1;
8339 		} else
8340 			drm_mode_destroy(aconnector->base.dev, new_mode);
8341 	}
8342  out:
8343 	return new_modes_count;
8344 }
8345 
8346 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8347 						   struct edid *edid)
8348 {
8349 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8350 		to_amdgpu_dm_connector(connector);
8351 
8352 	if (!edid)
8353 		return;
8354 
8355 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8356 		amdgpu_dm_connector->num_modes +=
8357 			add_fs_modes(amdgpu_dm_connector);
8358 }
8359 
8360 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8361 {
8362 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8363 			to_amdgpu_dm_connector(connector);
8364 	struct drm_encoder *encoder;
8365 	struct edid *edid = amdgpu_dm_connector->edid;
8366 
8367 	encoder = amdgpu_dm_connector_to_encoder(connector);
8368 
8369 	if (!drm_edid_is_valid(edid)) {
8370 		amdgpu_dm_connector->num_modes =
8371 				drm_add_modes_noedid(connector, 640, 480);
8372 	} else {
8373 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8374 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8375 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8376 	}
8377 	amdgpu_dm_fbc_init(connector);
8378 
8379 	return amdgpu_dm_connector->num_modes;
8380 }
8381 
8382 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8383 				     struct amdgpu_dm_connector *aconnector,
8384 				     int connector_type,
8385 				     struct dc_link *link,
8386 				     int link_index)
8387 {
8388 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8389 
8390 	/*
8391 	 * Some of the properties below require access to state, like bpc.
8392 	 * Allocate some default initial connector state with our reset helper.
8393 	 */
8394 	if (aconnector->base.funcs->reset)
8395 		aconnector->base.funcs->reset(&aconnector->base);
8396 
8397 	aconnector->connector_id = link_index;
8398 	aconnector->dc_link = link;
8399 	aconnector->base.interlace_allowed = false;
8400 	aconnector->base.doublescan_allowed = false;
8401 	aconnector->base.stereo_allowed = false;
8402 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8403 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8404 	aconnector->audio_inst = -1;
8405 	mutex_init(&aconnector->hpd_lock);
8406 
8407 	/*
8408 	 * configure support HPD hot plug connector_>polled default value is 0
8409 	 * which means HPD hot plug not supported
8410 	 */
8411 	switch (connector_type) {
8412 	case DRM_MODE_CONNECTOR_HDMIA:
8413 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8414 		aconnector->base.ycbcr_420_allowed =
8415 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8416 		break;
8417 	case DRM_MODE_CONNECTOR_DisplayPort:
8418 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8419 		link->link_enc = link_enc_cfg_get_link_enc(link);
8420 		ASSERT(link->link_enc);
8421 		if (link->link_enc)
8422 			aconnector->base.ycbcr_420_allowed =
8423 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8424 		break;
8425 	case DRM_MODE_CONNECTOR_DVID:
8426 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8427 		break;
8428 	default:
8429 		break;
8430 	}
8431 
8432 	drm_object_attach_property(&aconnector->base.base,
8433 				dm->ddev->mode_config.scaling_mode_property,
8434 				DRM_MODE_SCALE_NONE);
8435 
8436 	drm_object_attach_property(&aconnector->base.base,
8437 				adev->mode_info.underscan_property,
8438 				UNDERSCAN_OFF);
8439 	drm_object_attach_property(&aconnector->base.base,
8440 				adev->mode_info.underscan_hborder_property,
8441 				0);
8442 	drm_object_attach_property(&aconnector->base.base,
8443 				adev->mode_info.underscan_vborder_property,
8444 				0);
8445 
8446 	if (!aconnector->mst_port)
8447 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8448 
8449 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8450 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8451 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8452 
8453 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8454 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8455 		drm_object_attach_property(&aconnector->base.base,
8456 				adev->mode_info.abm_level_property, 0);
8457 	}
8458 
8459 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8460 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8461 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8462 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8463 
8464 		if (!aconnector->mst_port)
8465 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8466 
8467 #ifdef CONFIG_DRM_AMD_DC_HDCP
8468 		if (adev->dm.hdcp_workqueue)
8469 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8470 #endif
8471 	}
8472 }
8473 
8474 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8475 			      struct i2c_msg *msgs, int num)
8476 {
8477 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8478 	struct ddc_service *ddc_service = i2c->ddc_service;
8479 	struct i2c_command cmd;
8480 	int i;
8481 	int result = -EIO;
8482 
8483 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8484 
8485 	if (!cmd.payloads)
8486 		return result;
8487 
8488 	cmd.number_of_payloads = num;
8489 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8490 	cmd.speed = 100;
8491 
8492 	for (i = 0; i < num; i++) {
8493 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8494 		cmd.payloads[i].address = msgs[i].addr;
8495 		cmd.payloads[i].length = msgs[i].len;
8496 		cmd.payloads[i].data = msgs[i].buf;
8497 	}
8498 
8499 	if (dc_submit_i2c(
8500 			ddc_service->ctx->dc,
8501 			ddc_service->ddc_pin->hw_info.ddc_channel,
8502 			&cmd))
8503 		result = num;
8504 
8505 	kfree(cmd.payloads);
8506 	return result;
8507 }
8508 
8509 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8510 {
8511 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8512 }
8513 
8514 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8515 	.master_xfer = amdgpu_dm_i2c_xfer,
8516 	.functionality = amdgpu_dm_i2c_func,
8517 };
8518 
8519 static struct amdgpu_i2c_adapter *
8520 create_i2c(struct ddc_service *ddc_service,
8521 	   int link_index,
8522 	   int *res)
8523 {
8524 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8525 	struct amdgpu_i2c_adapter *i2c;
8526 
8527 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8528 	if (!i2c)
8529 		return NULL;
8530 	i2c->base.owner = THIS_MODULE;
8531 	i2c->base.class = I2C_CLASS_DDC;
8532 	i2c->base.dev.parent = &adev->pdev->dev;
8533 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8534 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8535 	i2c_set_adapdata(&i2c->base, i2c);
8536 	i2c->ddc_service = ddc_service;
8537 	if (i2c->ddc_service->ddc_pin)
8538 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8539 
8540 	return i2c;
8541 }
8542 
8543 
8544 /*
8545  * Note: this function assumes that dc_link_detect() was called for the
8546  * dc_link which will be represented by this aconnector.
8547  */
8548 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8549 				    struct amdgpu_dm_connector *aconnector,
8550 				    uint32_t link_index,
8551 				    struct amdgpu_encoder *aencoder)
8552 {
8553 	int res = 0;
8554 	int connector_type;
8555 	struct dc *dc = dm->dc;
8556 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8557 	struct amdgpu_i2c_adapter *i2c;
8558 
8559 	link->priv = aconnector;
8560 
8561 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8562 
8563 	i2c = create_i2c(link->ddc, link->link_index, &res);
8564 	if (!i2c) {
8565 		DRM_ERROR("Failed to create i2c adapter data\n");
8566 		return -ENOMEM;
8567 	}
8568 
8569 	aconnector->i2c = i2c;
8570 	res = i2c_add_adapter(&i2c->base);
8571 
8572 	if (res) {
8573 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8574 		goto out_free;
8575 	}
8576 
8577 	connector_type = to_drm_connector_type(link->connector_signal);
8578 
8579 	res = drm_connector_init_with_ddc(
8580 			dm->ddev,
8581 			&aconnector->base,
8582 			&amdgpu_dm_connector_funcs,
8583 			connector_type,
8584 			&i2c->base);
8585 
8586 	if (res) {
8587 		DRM_ERROR("connector_init failed\n");
8588 		aconnector->connector_id = -1;
8589 		goto out_free;
8590 	}
8591 
8592 	drm_connector_helper_add(
8593 			&aconnector->base,
8594 			&amdgpu_dm_connector_helper_funcs);
8595 
8596 	amdgpu_dm_connector_init_helper(
8597 		dm,
8598 		aconnector,
8599 		connector_type,
8600 		link,
8601 		link_index);
8602 
8603 	drm_connector_attach_encoder(
8604 		&aconnector->base, &aencoder->base);
8605 
8606 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8607 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8608 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8609 
8610 out_free:
8611 	if (res) {
8612 		kfree(i2c);
8613 		aconnector->i2c = NULL;
8614 	}
8615 	return res;
8616 }
8617 
8618 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8619 {
8620 	switch (adev->mode_info.num_crtc) {
8621 	case 1:
8622 		return 0x1;
8623 	case 2:
8624 		return 0x3;
8625 	case 3:
8626 		return 0x7;
8627 	case 4:
8628 		return 0xf;
8629 	case 5:
8630 		return 0x1f;
8631 	case 6:
8632 	default:
8633 		return 0x3f;
8634 	}
8635 }
8636 
8637 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8638 				  struct amdgpu_encoder *aencoder,
8639 				  uint32_t link_index)
8640 {
8641 	struct amdgpu_device *adev = drm_to_adev(dev);
8642 
8643 	int res = drm_encoder_init(dev,
8644 				   &aencoder->base,
8645 				   &amdgpu_dm_encoder_funcs,
8646 				   DRM_MODE_ENCODER_TMDS,
8647 				   NULL);
8648 
8649 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8650 
8651 	if (!res)
8652 		aencoder->encoder_id = link_index;
8653 	else
8654 		aencoder->encoder_id = -1;
8655 
8656 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8657 
8658 	return res;
8659 }
8660 
8661 static void manage_dm_interrupts(struct amdgpu_device *adev,
8662 				 struct amdgpu_crtc *acrtc,
8663 				 bool enable)
8664 {
8665 	/*
8666 	 * We have no guarantee that the frontend index maps to the same
8667 	 * backend index - some even map to more than one.
8668 	 *
8669 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8670 	 */
8671 	int irq_type =
8672 		amdgpu_display_crtc_idx_to_irq_type(
8673 			adev,
8674 			acrtc->crtc_id);
8675 
8676 	if (enable) {
8677 		drm_crtc_vblank_on(&acrtc->base);
8678 		amdgpu_irq_get(
8679 			adev,
8680 			&adev->pageflip_irq,
8681 			irq_type);
8682 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8683 		amdgpu_irq_get(
8684 			adev,
8685 			&adev->vline0_irq,
8686 			irq_type);
8687 #endif
8688 	} else {
8689 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8690 		amdgpu_irq_put(
8691 			adev,
8692 			&adev->vline0_irq,
8693 			irq_type);
8694 #endif
8695 		amdgpu_irq_put(
8696 			adev,
8697 			&adev->pageflip_irq,
8698 			irq_type);
8699 		drm_crtc_vblank_off(&acrtc->base);
8700 	}
8701 }
8702 
8703 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8704 				      struct amdgpu_crtc *acrtc)
8705 {
8706 	int irq_type =
8707 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8708 
8709 	/**
8710 	 * This reads the current state for the IRQ and force reapplies
8711 	 * the setting to hardware.
8712 	 */
8713 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8714 }
8715 
8716 static bool
8717 is_scaling_state_different(const struct dm_connector_state *dm_state,
8718 			   const struct dm_connector_state *old_dm_state)
8719 {
8720 	if (dm_state->scaling != old_dm_state->scaling)
8721 		return true;
8722 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8723 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8724 			return true;
8725 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8726 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8727 			return true;
8728 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8729 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8730 		return true;
8731 	return false;
8732 }
8733 
8734 #ifdef CONFIG_DRM_AMD_DC_HDCP
8735 static bool is_content_protection_different(struct drm_connector_state *state,
8736 					    const struct drm_connector_state *old_state,
8737 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8738 {
8739 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8740 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8741 
8742 	/* Handle: Type0/1 change */
8743 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8744 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8745 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8746 		return true;
8747 	}
8748 
8749 	/* CP is being re enabled, ignore this
8750 	 *
8751 	 * Handles:	ENABLED -> DESIRED
8752 	 */
8753 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8754 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8755 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8756 		return false;
8757 	}
8758 
8759 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8760 	 *
8761 	 * Handles:	UNDESIRED -> ENABLED
8762 	 */
8763 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8764 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8765 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8766 
8767 	/* Stream removed and re-enabled
8768 	 *
8769 	 * Can sometimes overlap with the HPD case,
8770 	 * thus set update_hdcp to false to avoid
8771 	 * setting HDCP multiple times.
8772 	 *
8773 	 * Handles:	DESIRED -> DESIRED (Special case)
8774 	 */
8775 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8776 		state->crtc && state->crtc->enabled &&
8777 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8778 		dm_con_state->update_hdcp = false;
8779 		return true;
8780 	}
8781 
8782 	/* Hot-plug, headless s3, dpms
8783 	 *
8784 	 * Only start HDCP if the display is connected/enabled.
8785 	 * update_hdcp flag will be set to false until the next
8786 	 * HPD comes in.
8787 	 *
8788 	 * Handles:	DESIRED -> DESIRED (Special case)
8789 	 */
8790 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8791 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8792 		dm_con_state->update_hdcp = false;
8793 		return true;
8794 	}
8795 
8796 	/*
8797 	 * Handles:	UNDESIRED -> UNDESIRED
8798 	 *		DESIRED -> DESIRED
8799 	 *		ENABLED -> ENABLED
8800 	 */
8801 	if (old_state->content_protection == state->content_protection)
8802 		return false;
8803 
8804 	/*
8805 	 * Handles:	UNDESIRED -> DESIRED
8806 	 *		DESIRED -> UNDESIRED
8807 	 *		ENABLED -> UNDESIRED
8808 	 */
8809 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8810 		return true;
8811 
8812 	/*
8813 	 * Handles:	DESIRED -> ENABLED
8814 	 */
8815 	return false;
8816 }
8817 
8818 #endif
8819 static void remove_stream(struct amdgpu_device *adev,
8820 			  struct amdgpu_crtc *acrtc,
8821 			  struct dc_stream_state *stream)
8822 {
8823 	/* this is the update mode case */
8824 
8825 	acrtc->otg_inst = -1;
8826 	acrtc->enabled = false;
8827 }
8828 
8829 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8830 			       struct dc_cursor_position *position)
8831 {
8832 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8833 	int x, y;
8834 	int xorigin = 0, yorigin = 0;
8835 
8836 	if (!crtc || !plane->state->fb)
8837 		return 0;
8838 
8839 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8840 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8841 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8842 			  __func__,
8843 			  plane->state->crtc_w,
8844 			  plane->state->crtc_h);
8845 		return -EINVAL;
8846 	}
8847 
8848 	x = plane->state->crtc_x;
8849 	y = plane->state->crtc_y;
8850 
8851 	if (x <= -amdgpu_crtc->max_cursor_width ||
8852 	    y <= -amdgpu_crtc->max_cursor_height)
8853 		return 0;
8854 
8855 	if (x < 0) {
8856 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8857 		x = 0;
8858 	}
8859 	if (y < 0) {
8860 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8861 		y = 0;
8862 	}
8863 	position->enable = true;
8864 	position->translate_by_source = true;
8865 	position->x = x;
8866 	position->y = y;
8867 	position->x_hotspot = xorigin;
8868 	position->y_hotspot = yorigin;
8869 
8870 	return 0;
8871 }
8872 
8873 static void handle_cursor_update(struct drm_plane *plane,
8874 				 struct drm_plane_state *old_plane_state)
8875 {
8876 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8877 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8878 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8879 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8880 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8881 	uint64_t address = afb ? afb->address : 0;
8882 	struct dc_cursor_position position = {0};
8883 	struct dc_cursor_attributes attributes;
8884 	int ret;
8885 
8886 	if (!plane->state->fb && !old_plane_state->fb)
8887 		return;
8888 
8889 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8890 		      __func__,
8891 		      amdgpu_crtc->crtc_id,
8892 		      plane->state->crtc_w,
8893 		      plane->state->crtc_h);
8894 
8895 	ret = get_cursor_position(plane, crtc, &position);
8896 	if (ret)
8897 		return;
8898 
8899 	if (!position.enable) {
8900 		/* turn off cursor */
8901 		if (crtc_state && crtc_state->stream) {
8902 			mutex_lock(&adev->dm.dc_lock);
8903 			dc_stream_set_cursor_position(crtc_state->stream,
8904 						      &position);
8905 			mutex_unlock(&adev->dm.dc_lock);
8906 		}
8907 		return;
8908 	}
8909 
8910 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8911 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8912 
8913 	memset(&attributes, 0, sizeof(attributes));
8914 	attributes.address.high_part = upper_32_bits(address);
8915 	attributes.address.low_part  = lower_32_bits(address);
8916 	attributes.width             = plane->state->crtc_w;
8917 	attributes.height            = plane->state->crtc_h;
8918 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8919 	attributes.rotation_angle    = 0;
8920 	attributes.attribute_flags.value = 0;
8921 
8922 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8923 
8924 	if (crtc_state->stream) {
8925 		mutex_lock(&adev->dm.dc_lock);
8926 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8927 							 &attributes))
8928 			DRM_ERROR("DC failed to set cursor attributes\n");
8929 
8930 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8931 						   &position))
8932 			DRM_ERROR("DC failed to set cursor position\n");
8933 		mutex_unlock(&adev->dm.dc_lock);
8934 	}
8935 }
8936 
8937 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8938 {
8939 
8940 	assert_spin_locked(&acrtc->base.dev->event_lock);
8941 	WARN_ON(acrtc->event);
8942 
8943 	acrtc->event = acrtc->base.state->event;
8944 
8945 	/* Set the flip status */
8946 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8947 
8948 	/* Mark this event as consumed */
8949 	acrtc->base.state->event = NULL;
8950 
8951 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8952 		     acrtc->crtc_id);
8953 }
8954 
8955 static void update_freesync_state_on_stream(
8956 	struct amdgpu_display_manager *dm,
8957 	struct dm_crtc_state *new_crtc_state,
8958 	struct dc_stream_state *new_stream,
8959 	struct dc_plane_state *surface,
8960 	u32 flip_timestamp_in_us)
8961 {
8962 	struct mod_vrr_params vrr_params;
8963 	struct dc_info_packet vrr_infopacket = {0};
8964 	struct amdgpu_device *adev = dm->adev;
8965 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8966 	unsigned long flags;
8967 	bool pack_sdp_v1_3 = false;
8968 
8969 	if (!new_stream)
8970 		return;
8971 
8972 	/*
8973 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8974 	 * For now it's sufficient to just guard against these conditions.
8975 	 */
8976 
8977 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8978 		return;
8979 
8980 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8981         vrr_params = acrtc->dm_irq_params.vrr_params;
8982 
8983 	if (surface) {
8984 		mod_freesync_handle_preflip(
8985 			dm->freesync_module,
8986 			surface,
8987 			new_stream,
8988 			flip_timestamp_in_us,
8989 			&vrr_params);
8990 
8991 		if (adev->family < AMDGPU_FAMILY_AI &&
8992 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8993 			mod_freesync_handle_v_update(dm->freesync_module,
8994 						     new_stream, &vrr_params);
8995 
8996 			/* Need to call this before the frame ends. */
8997 			dc_stream_adjust_vmin_vmax(dm->dc,
8998 						   new_crtc_state->stream,
8999 						   &vrr_params.adjust);
9000 		}
9001 	}
9002 
9003 	mod_freesync_build_vrr_infopacket(
9004 		dm->freesync_module,
9005 		new_stream,
9006 		&vrr_params,
9007 		PACKET_TYPE_VRR,
9008 		TRANSFER_FUNC_UNKNOWN,
9009 		&vrr_infopacket,
9010 		pack_sdp_v1_3);
9011 
9012 	new_crtc_state->freesync_timing_changed |=
9013 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9014 			&vrr_params.adjust,
9015 			sizeof(vrr_params.adjust)) != 0);
9016 
9017 	new_crtc_state->freesync_vrr_info_changed |=
9018 		(memcmp(&new_crtc_state->vrr_infopacket,
9019 			&vrr_infopacket,
9020 			sizeof(vrr_infopacket)) != 0);
9021 
9022 	acrtc->dm_irq_params.vrr_params = vrr_params;
9023 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9024 
9025 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9026 	new_stream->vrr_infopacket = vrr_infopacket;
9027 
9028 	if (new_crtc_state->freesync_vrr_info_changed)
9029 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9030 			      new_crtc_state->base.crtc->base.id,
9031 			      (int)new_crtc_state->base.vrr_enabled,
9032 			      (int)vrr_params.state);
9033 
9034 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9035 }
9036 
9037 static void update_stream_irq_parameters(
9038 	struct amdgpu_display_manager *dm,
9039 	struct dm_crtc_state *new_crtc_state)
9040 {
9041 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9042 	struct mod_vrr_params vrr_params;
9043 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9044 	struct amdgpu_device *adev = dm->adev;
9045 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9046 	unsigned long flags;
9047 
9048 	if (!new_stream)
9049 		return;
9050 
9051 	/*
9052 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9053 	 * For now it's sufficient to just guard against these conditions.
9054 	 */
9055 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9056 		return;
9057 
9058 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9059 	vrr_params = acrtc->dm_irq_params.vrr_params;
9060 
9061 	if (new_crtc_state->vrr_supported &&
9062 	    config.min_refresh_in_uhz &&
9063 	    config.max_refresh_in_uhz) {
9064 		/*
9065 		 * if freesync compatible mode was set, config.state will be set
9066 		 * in atomic check
9067 		 */
9068 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9069 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9070 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9071 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9072 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9073 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9074 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9075 		} else {
9076 			config.state = new_crtc_state->base.vrr_enabled ?
9077 						     VRR_STATE_ACTIVE_VARIABLE :
9078 						     VRR_STATE_INACTIVE;
9079 		}
9080 	} else {
9081 		config.state = VRR_STATE_UNSUPPORTED;
9082 	}
9083 
9084 	mod_freesync_build_vrr_params(dm->freesync_module,
9085 				      new_stream,
9086 				      &config, &vrr_params);
9087 
9088 	new_crtc_state->freesync_timing_changed |=
9089 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9090 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9091 
9092 	new_crtc_state->freesync_config = config;
9093 	/* Copy state for access from DM IRQ handler */
9094 	acrtc->dm_irq_params.freesync_config = config;
9095 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9096 	acrtc->dm_irq_params.vrr_params = vrr_params;
9097 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9098 }
9099 
9100 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9101 					    struct dm_crtc_state *new_state)
9102 {
9103 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9104 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9105 
9106 	if (!old_vrr_active && new_vrr_active) {
9107 		/* Transition VRR inactive -> active:
9108 		 * While VRR is active, we must not disable vblank irq, as a
9109 		 * reenable after disable would compute bogus vblank/pflip
9110 		 * timestamps if it likely happened inside display front-porch.
9111 		 *
9112 		 * We also need vupdate irq for the actual core vblank handling
9113 		 * at end of vblank.
9114 		 */
9115 		dm_set_vupdate_irq(new_state->base.crtc, true);
9116 		drm_crtc_vblank_get(new_state->base.crtc);
9117 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9118 				 __func__, new_state->base.crtc->base.id);
9119 	} else if (old_vrr_active && !new_vrr_active) {
9120 		/* Transition VRR active -> inactive:
9121 		 * Allow vblank irq disable again for fixed refresh rate.
9122 		 */
9123 		dm_set_vupdate_irq(new_state->base.crtc, false);
9124 		drm_crtc_vblank_put(new_state->base.crtc);
9125 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9126 				 __func__, new_state->base.crtc->base.id);
9127 	}
9128 }
9129 
9130 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9131 {
9132 	struct drm_plane *plane;
9133 	struct drm_plane_state *old_plane_state;
9134 	int i;
9135 
9136 	/*
9137 	 * TODO: Make this per-stream so we don't issue redundant updates for
9138 	 * commits with multiple streams.
9139 	 */
9140 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9141 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9142 			handle_cursor_update(plane, old_plane_state);
9143 }
9144 
9145 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9146 				    struct dc_state *dc_state,
9147 				    struct drm_device *dev,
9148 				    struct amdgpu_display_manager *dm,
9149 				    struct drm_crtc *pcrtc,
9150 				    bool wait_for_vblank)
9151 {
9152 	uint32_t i;
9153 	uint64_t timestamp_ns;
9154 	struct drm_plane *plane;
9155 	struct drm_plane_state *old_plane_state, *new_plane_state;
9156 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9157 	struct drm_crtc_state *new_pcrtc_state =
9158 			drm_atomic_get_new_crtc_state(state, pcrtc);
9159 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9160 	struct dm_crtc_state *dm_old_crtc_state =
9161 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9162 	int planes_count = 0, vpos, hpos;
9163 	long r;
9164 	unsigned long flags;
9165 	struct amdgpu_bo *abo;
9166 	uint32_t target_vblank, last_flip_vblank;
9167 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9168 	bool pflip_present = false;
9169 	struct {
9170 		struct dc_surface_update surface_updates[MAX_SURFACES];
9171 		struct dc_plane_info plane_infos[MAX_SURFACES];
9172 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9173 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9174 		struct dc_stream_update stream_update;
9175 	} *bundle;
9176 
9177 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9178 
9179 	if (!bundle) {
9180 		dm_error("Failed to allocate update bundle\n");
9181 		goto cleanup;
9182 	}
9183 
9184 	/*
9185 	 * Disable the cursor first if we're disabling all the planes.
9186 	 * It'll remain on the screen after the planes are re-enabled
9187 	 * if we don't.
9188 	 */
9189 	if (acrtc_state->active_planes == 0)
9190 		amdgpu_dm_commit_cursors(state);
9191 
9192 	/* update planes when needed */
9193 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9194 		struct drm_crtc *crtc = new_plane_state->crtc;
9195 		struct drm_crtc_state *new_crtc_state;
9196 		struct drm_framebuffer *fb = new_plane_state->fb;
9197 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9198 		bool plane_needs_flip;
9199 		struct dc_plane_state *dc_plane;
9200 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9201 
9202 		/* Cursor plane is handled after stream updates */
9203 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9204 			continue;
9205 
9206 		if (!fb || !crtc || pcrtc != crtc)
9207 			continue;
9208 
9209 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9210 		if (!new_crtc_state->active)
9211 			continue;
9212 
9213 		dc_plane = dm_new_plane_state->dc_state;
9214 
9215 		bundle->surface_updates[planes_count].surface = dc_plane;
9216 		if (new_pcrtc_state->color_mgmt_changed) {
9217 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9218 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9219 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9220 		}
9221 
9222 		fill_dc_scaling_info(dm->adev, new_plane_state,
9223 				     &bundle->scaling_infos[planes_count]);
9224 
9225 		bundle->surface_updates[planes_count].scaling_info =
9226 			&bundle->scaling_infos[planes_count];
9227 
9228 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9229 
9230 		pflip_present = pflip_present || plane_needs_flip;
9231 
9232 		if (!plane_needs_flip) {
9233 			planes_count += 1;
9234 			continue;
9235 		}
9236 
9237 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9238 
9239 		/*
9240 		 * Wait for all fences on this FB. Do limited wait to avoid
9241 		 * deadlock during GPU reset when this fence will not signal
9242 		 * but we hold reservation lock for the BO.
9243 		 */
9244 		r = dma_resv_wait_timeout(abo->tbo.base.resv,
9245 					  DMA_RESV_USAGE_WRITE, false,
9246 					  msecs_to_jiffies(5000));
9247 		if (unlikely(r <= 0))
9248 			DRM_ERROR("Waiting for fences timed out!");
9249 
9250 		fill_dc_plane_info_and_addr(
9251 			dm->adev, new_plane_state,
9252 			afb->tiling_flags,
9253 			&bundle->plane_infos[planes_count],
9254 			&bundle->flip_addrs[planes_count].address,
9255 			afb->tmz_surface, false);
9256 
9257 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9258 				 new_plane_state->plane->index,
9259 				 bundle->plane_infos[planes_count].dcc.enable);
9260 
9261 		bundle->surface_updates[planes_count].plane_info =
9262 			&bundle->plane_infos[planes_count];
9263 
9264 		/*
9265 		 * Only allow immediate flips for fast updates that don't
9266 		 * change FB pitch, DCC state, rotation or mirroing.
9267 		 */
9268 		bundle->flip_addrs[planes_count].flip_immediate =
9269 			crtc->state->async_flip &&
9270 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9271 
9272 		timestamp_ns = ktime_get_ns();
9273 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9274 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9275 		bundle->surface_updates[planes_count].surface = dc_plane;
9276 
9277 		if (!bundle->surface_updates[planes_count].surface) {
9278 			DRM_ERROR("No surface for CRTC: id=%d\n",
9279 					acrtc_attach->crtc_id);
9280 			continue;
9281 		}
9282 
9283 		if (plane == pcrtc->primary)
9284 			update_freesync_state_on_stream(
9285 				dm,
9286 				acrtc_state,
9287 				acrtc_state->stream,
9288 				dc_plane,
9289 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9290 
9291 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9292 				 __func__,
9293 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9294 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9295 
9296 		planes_count += 1;
9297 
9298 	}
9299 
9300 	if (pflip_present) {
9301 		if (!vrr_active) {
9302 			/* Use old throttling in non-vrr fixed refresh rate mode
9303 			 * to keep flip scheduling based on target vblank counts
9304 			 * working in a backwards compatible way, e.g., for
9305 			 * clients using the GLX_OML_sync_control extension or
9306 			 * DRI3/Present extension with defined target_msc.
9307 			 */
9308 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9309 		}
9310 		else {
9311 			/* For variable refresh rate mode only:
9312 			 * Get vblank of last completed flip to avoid > 1 vrr
9313 			 * flips per video frame by use of throttling, but allow
9314 			 * flip programming anywhere in the possibly large
9315 			 * variable vrr vblank interval for fine-grained flip
9316 			 * timing control and more opportunity to avoid stutter
9317 			 * on late submission of flips.
9318 			 */
9319 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9320 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9321 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9322 		}
9323 
9324 		target_vblank = last_flip_vblank + wait_for_vblank;
9325 
9326 		/*
9327 		 * Wait until we're out of the vertical blank period before the one
9328 		 * targeted by the flip
9329 		 */
9330 		while ((acrtc_attach->enabled &&
9331 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9332 							    0, &vpos, &hpos, NULL,
9333 							    NULL, &pcrtc->hwmode)
9334 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9335 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9336 			(int)(target_vblank -
9337 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9338 			usleep_range(1000, 1100);
9339 		}
9340 
9341 		/**
9342 		 * Prepare the flip event for the pageflip interrupt to handle.
9343 		 *
9344 		 * This only works in the case where we've already turned on the
9345 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9346 		 * from 0 -> n planes we have to skip a hardware generated event
9347 		 * and rely on sending it from software.
9348 		 */
9349 		if (acrtc_attach->base.state->event &&
9350 		    acrtc_state->active_planes > 0 &&
9351 		    !acrtc_state->force_dpms_off) {
9352 			drm_crtc_vblank_get(pcrtc);
9353 
9354 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9355 
9356 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9357 			prepare_flip_isr(acrtc_attach);
9358 
9359 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9360 		}
9361 
9362 		if (acrtc_state->stream) {
9363 			if (acrtc_state->freesync_vrr_info_changed)
9364 				bundle->stream_update.vrr_infopacket =
9365 					&acrtc_state->stream->vrr_infopacket;
9366 		}
9367 	}
9368 
9369 	/* Update the planes if changed or disable if we don't have any. */
9370 	if ((planes_count || acrtc_state->active_planes == 0) &&
9371 		acrtc_state->stream) {
9372 #if defined(CONFIG_DRM_AMD_DC_DCN)
9373 		/*
9374 		 * If PSR or idle optimizations are enabled then flush out
9375 		 * any pending work before hardware programming.
9376 		 */
9377 		if (dm->vblank_control_workqueue)
9378 			flush_workqueue(dm->vblank_control_workqueue);
9379 #endif
9380 
9381 		bundle->stream_update.stream = acrtc_state->stream;
9382 		if (new_pcrtc_state->mode_changed) {
9383 			bundle->stream_update.src = acrtc_state->stream->src;
9384 			bundle->stream_update.dst = acrtc_state->stream->dst;
9385 		}
9386 
9387 		if (new_pcrtc_state->color_mgmt_changed) {
9388 			/*
9389 			 * TODO: This isn't fully correct since we've actually
9390 			 * already modified the stream in place.
9391 			 */
9392 			bundle->stream_update.gamut_remap =
9393 				&acrtc_state->stream->gamut_remap_matrix;
9394 			bundle->stream_update.output_csc_transform =
9395 				&acrtc_state->stream->csc_color_matrix;
9396 			bundle->stream_update.out_transfer_func =
9397 				acrtc_state->stream->out_transfer_func;
9398 		}
9399 
9400 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9401 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9402 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9403 
9404 		/*
9405 		 * If FreeSync state on the stream has changed then we need to
9406 		 * re-adjust the min/max bounds now that DC doesn't handle this
9407 		 * as part of commit.
9408 		 */
9409 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9410 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9411 			dc_stream_adjust_vmin_vmax(
9412 				dm->dc, acrtc_state->stream,
9413 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9414 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9415 		}
9416 		mutex_lock(&dm->dc_lock);
9417 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9418 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9419 			amdgpu_dm_psr_disable(acrtc_state->stream);
9420 
9421 		dc_commit_updates_for_stream(dm->dc,
9422 						     bundle->surface_updates,
9423 						     planes_count,
9424 						     acrtc_state->stream,
9425 						     &bundle->stream_update,
9426 						     dc_state);
9427 
9428 		/**
9429 		 * Enable or disable the interrupts on the backend.
9430 		 *
9431 		 * Most pipes are put into power gating when unused.
9432 		 *
9433 		 * When power gating is enabled on a pipe we lose the
9434 		 * interrupt enablement state when power gating is disabled.
9435 		 *
9436 		 * So we need to update the IRQ control state in hardware
9437 		 * whenever the pipe turns on (since it could be previously
9438 		 * power gated) or off (since some pipes can't be power gated
9439 		 * on some ASICs).
9440 		 */
9441 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9442 			dm_update_pflip_irq_state(drm_to_adev(dev),
9443 						  acrtc_attach);
9444 
9445 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9446 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9447 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9448 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9449 
9450 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9451 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9452 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9453 			struct amdgpu_dm_connector *aconn =
9454 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9455 
9456 			if (aconn->psr_skip_count > 0)
9457 				aconn->psr_skip_count--;
9458 
9459 			/* Allow PSR when skip count is 0. */
9460 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9461 		} else {
9462 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9463 		}
9464 
9465 		mutex_unlock(&dm->dc_lock);
9466 	}
9467 
9468 	/*
9469 	 * Update cursor state *after* programming all the planes.
9470 	 * This avoids redundant programming in the case where we're going
9471 	 * to be disabling a single plane - those pipes are being disabled.
9472 	 */
9473 	if (acrtc_state->active_planes)
9474 		amdgpu_dm_commit_cursors(state);
9475 
9476 cleanup:
9477 	kfree(bundle);
9478 }
9479 
9480 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9481 				   struct drm_atomic_state *state)
9482 {
9483 	struct amdgpu_device *adev = drm_to_adev(dev);
9484 	struct amdgpu_dm_connector *aconnector;
9485 	struct drm_connector *connector;
9486 	struct drm_connector_state *old_con_state, *new_con_state;
9487 	struct drm_crtc_state *new_crtc_state;
9488 	struct dm_crtc_state *new_dm_crtc_state;
9489 	const struct dc_stream_status *status;
9490 	int i, inst;
9491 
9492 	/* Notify device removals. */
9493 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9494 		if (old_con_state->crtc != new_con_state->crtc) {
9495 			/* CRTC changes require notification. */
9496 			goto notify;
9497 		}
9498 
9499 		if (!new_con_state->crtc)
9500 			continue;
9501 
9502 		new_crtc_state = drm_atomic_get_new_crtc_state(
9503 			state, new_con_state->crtc);
9504 
9505 		if (!new_crtc_state)
9506 			continue;
9507 
9508 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9509 			continue;
9510 
9511 	notify:
9512 		aconnector = to_amdgpu_dm_connector(connector);
9513 
9514 		mutex_lock(&adev->dm.audio_lock);
9515 		inst = aconnector->audio_inst;
9516 		aconnector->audio_inst = -1;
9517 		mutex_unlock(&adev->dm.audio_lock);
9518 
9519 		amdgpu_dm_audio_eld_notify(adev, inst);
9520 	}
9521 
9522 	/* Notify audio device additions. */
9523 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9524 		if (!new_con_state->crtc)
9525 			continue;
9526 
9527 		new_crtc_state = drm_atomic_get_new_crtc_state(
9528 			state, new_con_state->crtc);
9529 
9530 		if (!new_crtc_state)
9531 			continue;
9532 
9533 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9534 			continue;
9535 
9536 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9537 		if (!new_dm_crtc_state->stream)
9538 			continue;
9539 
9540 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9541 		if (!status)
9542 			continue;
9543 
9544 		aconnector = to_amdgpu_dm_connector(connector);
9545 
9546 		mutex_lock(&adev->dm.audio_lock);
9547 		inst = status->audio_inst;
9548 		aconnector->audio_inst = inst;
9549 		mutex_unlock(&adev->dm.audio_lock);
9550 
9551 		amdgpu_dm_audio_eld_notify(adev, inst);
9552 	}
9553 }
9554 
9555 /*
9556  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9557  * @crtc_state: the DRM CRTC state
9558  * @stream_state: the DC stream state.
9559  *
9560  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9561  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9562  */
9563 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9564 						struct dc_stream_state *stream_state)
9565 {
9566 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9567 }
9568 
9569 /**
9570  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9571  * @state: The atomic state to commit
9572  *
9573  * This will tell DC to commit the constructed DC state from atomic_check,
9574  * programming the hardware. Any failures here implies a hardware failure, since
9575  * atomic check should have filtered anything non-kosher.
9576  */
9577 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9578 {
9579 	struct drm_device *dev = state->dev;
9580 	struct amdgpu_device *adev = drm_to_adev(dev);
9581 	struct amdgpu_display_manager *dm = &adev->dm;
9582 	struct dm_atomic_state *dm_state;
9583 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9584 	uint32_t i, j;
9585 	struct drm_crtc *crtc;
9586 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9587 	unsigned long flags;
9588 	bool wait_for_vblank = true;
9589 	struct drm_connector *connector;
9590 	struct drm_connector_state *old_con_state, *new_con_state;
9591 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9592 	int crtc_disable_count = 0;
9593 	bool mode_set_reset_required = false;
9594 
9595 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9596 
9597 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9598 
9599 	dm_state = dm_atomic_get_new_state(state);
9600 	if (dm_state && dm_state->context) {
9601 		dc_state = dm_state->context;
9602 	} else {
9603 		/* No state changes, retain current state. */
9604 		dc_state_temp = dc_create_state(dm->dc);
9605 		ASSERT(dc_state_temp);
9606 		dc_state = dc_state_temp;
9607 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9608 	}
9609 
9610 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9611 				       new_crtc_state, i) {
9612 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9613 
9614 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9615 
9616 		if (old_crtc_state->active &&
9617 		    (!new_crtc_state->active ||
9618 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9619 			manage_dm_interrupts(adev, acrtc, false);
9620 			dc_stream_release(dm_old_crtc_state->stream);
9621 		}
9622 	}
9623 
9624 	drm_atomic_helper_calc_timestamping_constants(state);
9625 
9626 	/* update changed items */
9627 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9628 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9629 
9630 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9631 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9632 
9633 		drm_dbg_state(state->dev,
9634 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9635 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9636 			"connectors_changed:%d\n",
9637 			acrtc->crtc_id,
9638 			new_crtc_state->enable,
9639 			new_crtc_state->active,
9640 			new_crtc_state->planes_changed,
9641 			new_crtc_state->mode_changed,
9642 			new_crtc_state->active_changed,
9643 			new_crtc_state->connectors_changed);
9644 
9645 		/* Disable cursor if disabling crtc */
9646 		if (old_crtc_state->active && !new_crtc_state->active) {
9647 			struct dc_cursor_position position;
9648 
9649 			memset(&position, 0, sizeof(position));
9650 			mutex_lock(&dm->dc_lock);
9651 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9652 			mutex_unlock(&dm->dc_lock);
9653 		}
9654 
9655 		/* Copy all transient state flags into dc state */
9656 		if (dm_new_crtc_state->stream) {
9657 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9658 							    dm_new_crtc_state->stream);
9659 		}
9660 
9661 		/* handles headless hotplug case, updating new_state and
9662 		 * aconnector as needed
9663 		 */
9664 
9665 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9666 
9667 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9668 
9669 			if (!dm_new_crtc_state->stream) {
9670 				/*
9671 				 * this could happen because of issues with
9672 				 * userspace notifications delivery.
9673 				 * In this case userspace tries to set mode on
9674 				 * display which is disconnected in fact.
9675 				 * dc_sink is NULL in this case on aconnector.
9676 				 * We expect reset mode will come soon.
9677 				 *
9678 				 * This can also happen when unplug is done
9679 				 * during resume sequence ended
9680 				 *
9681 				 * In this case, we want to pretend we still
9682 				 * have a sink to keep the pipe running so that
9683 				 * hw state is consistent with the sw state
9684 				 */
9685 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9686 						__func__, acrtc->base.base.id);
9687 				continue;
9688 			}
9689 
9690 			if (dm_old_crtc_state->stream)
9691 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9692 
9693 			pm_runtime_get_noresume(dev->dev);
9694 
9695 			acrtc->enabled = true;
9696 			acrtc->hw_mode = new_crtc_state->mode;
9697 			crtc->hwmode = new_crtc_state->mode;
9698 			mode_set_reset_required = true;
9699 		} else if (modereset_required(new_crtc_state)) {
9700 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9701 			/* i.e. reset mode */
9702 			if (dm_old_crtc_state->stream)
9703 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9704 
9705 			mode_set_reset_required = true;
9706 		}
9707 	} /* for_each_crtc_in_state() */
9708 
9709 	if (dc_state) {
9710 		/* if there mode set or reset, disable eDP PSR */
9711 		if (mode_set_reset_required) {
9712 #if defined(CONFIG_DRM_AMD_DC_DCN)
9713 			if (dm->vblank_control_workqueue)
9714 				flush_workqueue(dm->vblank_control_workqueue);
9715 #endif
9716 			amdgpu_dm_psr_disable_all(dm);
9717 		}
9718 
9719 		dm_enable_per_frame_crtc_master_sync(dc_state);
9720 		mutex_lock(&dm->dc_lock);
9721 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9722 #if defined(CONFIG_DRM_AMD_DC_DCN)
9723                /* Allow idle optimization when vblank count is 0 for display off */
9724                if (dm->active_vblank_irq_count == 0)
9725                    dc_allow_idle_optimizations(dm->dc,true);
9726 #endif
9727 		mutex_unlock(&dm->dc_lock);
9728 	}
9729 
9730 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9731 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9732 
9733 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9734 
9735 		if (dm_new_crtc_state->stream != NULL) {
9736 			const struct dc_stream_status *status =
9737 					dc_stream_get_status(dm_new_crtc_state->stream);
9738 
9739 			if (!status)
9740 				status = dc_stream_get_status_from_state(dc_state,
9741 									 dm_new_crtc_state->stream);
9742 			if (!status)
9743 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9744 			else
9745 				acrtc->otg_inst = status->primary_otg_inst;
9746 		}
9747 	}
9748 #ifdef CONFIG_DRM_AMD_DC_HDCP
9749 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9750 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9751 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9752 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9753 
9754 		new_crtc_state = NULL;
9755 
9756 		if (acrtc)
9757 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9758 
9759 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9760 
9761 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9762 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9763 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9764 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9765 			dm_new_con_state->update_hdcp = true;
9766 			continue;
9767 		}
9768 
9769 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9770 			hdcp_update_display(
9771 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9772 				new_con_state->hdcp_content_type,
9773 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9774 	}
9775 #endif
9776 
9777 	/* Handle connector state changes */
9778 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9779 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9780 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9781 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9782 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9783 		struct dc_stream_update stream_update;
9784 		struct dc_info_packet hdr_packet;
9785 		struct dc_stream_status *status = NULL;
9786 		bool abm_changed, hdr_changed, scaling_changed;
9787 
9788 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9789 		memset(&stream_update, 0, sizeof(stream_update));
9790 
9791 		if (acrtc) {
9792 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9793 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9794 		}
9795 
9796 		/* Skip any modesets/resets */
9797 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9798 			continue;
9799 
9800 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9801 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9802 
9803 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9804 							     dm_old_con_state);
9805 
9806 		abm_changed = dm_new_crtc_state->abm_level !=
9807 			      dm_old_crtc_state->abm_level;
9808 
9809 		hdr_changed =
9810 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9811 
9812 		if (!scaling_changed && !abm_changed && !hdr_changed)
9813 			continue;
9814 
9815 		stream_update.stream = dm_new_crtc_state->stream;
9816 		if (scaling_changed) {
9817 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9818 					dm_new_con_state, dm_new_crtc_state->stream);
9819 
9820 			stream_update.src = dm_new_crtc_state->stream->src;
9821 			stream_update.dst = dm_new_crtc_state->stream->dst;
9822 		}
9823 
9824 		if (abm_changed) {
9825 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9826 
9827 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9828 		}
9829 
9830 		if (hdr_changed) {
9831 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9832 			stream_update.hdr_static_metadata = &hdr_packet;
9833 		}
9834 
9835 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9836 
9837 		if (WARN_ON(!status))
9838 			continue;
9839 
9840 		WARN_ON(!status->plane_count);
9841 
9842 		/*
9843 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9844 		 * Here we create an empty update on each plane.
9845 		 * To fix this, DC should permit updating only stream properties.
9846 		 */
9847 		for (j = 0; j < status->plane_count; j++)
9848 			dummy_updates[j].surface = status->plane_states[0];
9849 
9850 
9851 		mutex_lock(&dm->dc_lock);
9852 		dc_commit_updates_for_stream(dm->dc,
9853 						     dummy_updates,
9854 						     status->plane_count,
9855 						     dm_new_crtc_state->stream,
9856 						     &stream_update,
9857 						     dc_state);
9858 		mutex_unlock(&dm->dc_lock);
9859 	}
9860 
9861 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9862 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9863 				      new_crtc_state, i) {
9864 		if (old_crtc_state->active && !new_crtc_state->active)
9865 			crtc_disable_count++;
9866 
9867 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9868 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9869 
9870 		/* For freesync config update on crtc state and params for irq */
9871 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9872 
9873 		/* Handle vrr on->off / off->on transitions */
9874 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9875 						dm_new_crtc_state);
9876 	}
9877 
9878 	/**
9879 	 * Enable interrupts for CRTCs that are newly enabled or went through
9880 	 * a modeset. It was intentionally deferred until after the front end
9881 	 * state was modified to wait until the OTG was on and so the IRQ
9882 	 * handlers didn't access stale or invalid state.
9883 	 */
9884 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9885 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9886 #ifdef CONFIG_DEBUG_FS
9887 		bool configure_crc = false;
9888 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9889 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9890 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9891 #endif
9892 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9893 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9894 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9895 #endif
9896 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9897 
9898 		if (new_crtc_state->active &&
9899 		    (!old_crtc_state->active ||
9900 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9901 			dc_stream_retain(dm_new_crtc_state->stream);
9902 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9903 			manage_dm_interrupts(adev, acrtc, true);
9904 
9905 #ifdef CONFIG_DEBUG_FS
9906 			/**
9907 			 * Frontend may have changed so reapply the CRC capture
9908 			 * settings for the stream.
9909 			 */
9910 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9911 
9912 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9913 				configure_crc = true;
9914 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9915 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9916 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9917 					acrtc->dm_irq_params.crc_window.update_win = true;
9918 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9919 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9920 					crc_rd_wrk->crtc = crtc;
9921 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9922 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9923 				}
9924 #endif
9925 			}
9926 
9927 			if (configure_crc)
9928 				if (amdgpu_dm_crtc_configure_crc_source(
9929 					crtc, dm_new_crtc_state, cur_crc_src))
9930 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9931 #endif
9932 		}
9933 	}
9934 
9935 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9936 		if (new_crtc_state->async_flip)
9937 			wait_for_vblank = false;
9938 
9939 	/* update planes when needed per crtc*/
9940 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9941 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9942 
9943 		if (dm_new_crtc_state->stream)
9944 			amdgpu_dm_commit_planes(state, dc_state, dev,
9945 						dm, crtc, wait_for_vblank);
9946 	}
9947 
9948 	/* Update audio instances for each connector. */
9949 	amdgpu_dm_commit_audio(dev, state);
9950 
9951 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9952 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9953 	/* restore the backlight level */
9954 	for (i = 0; i < dm->num_of_edps; i++) {
9955 		if (dm->backlight_dev[i] &&
9956 		    (dm->actual_brightness[i] != dm->brightness[i]))
9957 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9958 	}
9959 #endif
9960 	/*
9961 	 * send vblank event on all events not handled in flip and
9962 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9963 	 */
9964 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9965 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9966 
9967 		if (new_crtc_state->event)
9968 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9969 
9970 		new_crtc_state->event = NULL;
9971 	}
9972 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9973 
9974 	/* Signal HW programming completion */
9975 	drm_atomic_helper_commit_hw_done(state);
9976 
9977 	if (wait_for_vblank)
9978 		drm_atomic_helper_wait_for_flip_done(dev, state);
9979 
9980 	drm_atomic_helper_cleanup_planes(dev, state);
9981 
9982 	/* return the stolen vga memory back to VRAM */
9983 	if (!adev->mman.keep_stolen_vga_memory)
9984 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9985 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9986 
9987 	/*
9988 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9989 	 * so we can put the GPU into runtime suspend if we're not driving any
9990 	 * displays anymore
9991 	 */
9992 	for (i = 0; i < crtc_disable_count; i++)
9993 		pm_runtime_put_autosuspend(dev->dev);
9994 	pm_runtime_mark_last_busy(dev->dev);
9995 
9996 	if (dc_state_temp)
9997 		dc_release_state(dc_state_temp);
9998 }
9999 
10000 
10001 static int dm_force_atomic_commit(struct drm_connector *connector)
10002 {
10003 	int ret = 0;
10004 	struct drm_device *ddev = connector->dev;
10005 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10006 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10007 	struct drm_plane *plane = disconnected_acrtc->base.primary;
10008 	struct drm_connector_state *conn_state;
10009 	struct drm_crtc_state *crtc_state;
10010 	struct drm_plane_state *plane_state;
10011 
10012 	if (!state)
10013 		return -ENOMEM;
10014 
10015 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10016 
10017 	/* Construct an atomic state to restore previous display setting */
10018 
10019 	/*
10020 	 * Attach connectors to drm_atomic_state
10021 	 */
10022 	conn_state = drm_atomic_get_connector_state(state, connector);
10023 
10024 	ret = PTR_ERR_OR_ZERO(conn_state);
10025 	if (ret)
10026 		goto out;
10027 
10028 	/* Attach crtc to drm_atomic_state*/
10029 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10030 
10031 	ret = PTR_ERR_OR_ZERO(crtc_state);
10032 	if (ret)
10033 		goto out;
10034 
10035 	/* force a restore */
10036 	crtc_state->mode_changed = true;
10037 
10038 	/* Attach plane to drm_atomic_state */
10039 	plane_state = drm_atomic_get_plane_state(state, plane);
10040 
10041 	ret = PTR_ERR_OR_ZERO(plane_state);
10042 	if (ret)
10043 		goto out;
10044 
10045 	/* Call commit internally with the state we just constructed */
10046 	ret = drm_atomic_commit(state);
10047 
10048 out:
10049 	drm_atomic_state_put(state);
10050 	if (ret)
10051 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10052 
10053 	return ret;
10054 }
10055 
10056 /*
10057  * This function handles all cases when set mode does not come upon hotplug.
10058  * This includes when a display is unplugged then plugged back into the
10059  * same port and when running without usermode desktop manager supprot
10060  */
10061 void dm_restore_drm_connector_state(struct drm_device *dev,
10062 				    struct drm_connector *connector)
10063 {
10064 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10065 	struct amdgpu_crtc *disconnected_acrtc;
10066 	struct dm_crtc_state *acrtc_state;
10067 
10068 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10069 		return;
10070 
10071 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10072 	if (!disconnected_acrtc)
10073 		return;
10074 
10075 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10076 	if (!acrtc_state->stream)
10077 		return;
10078 
10079 	/*
10080 	 * If the previous sink is not released and different from the current,
10081 	 * we deduce we are in a state where we can not rely on usermode call
10082 	 * to turn on the display, so we do it here
10083 	 */
10084 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10085 		dm_force_atomic_commit(&aconnector->base);
10086 }
10087 
10088 /*
10089  * Grabs all modesetting locks to serialize against any blocking commits,
10090  * Waits for completion of all non blocking commits.
10091  */
10092 static int do_aquire_global_lock(struct drm_device *dev,
10093 				 struct drm_atomic_state *state)
10094 {
10095 	struct drm_crtc *crtc;
10096 	struct drm_crtc_commit *commit;
10097 	long ret;
10098 
10099 	/*
10100 	 * Adding all modeset locks to aquire_ctx will
10101 	 * ensure that when the framework release it the
10102 	 * extra locks we are locking here will get released to
10103 	 */
10104 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10105 	if (ret)
10106 		return ret;
10107 
10108 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10109 		spin_lock(&crtc->commit_lock);
10110 		commit = list_first_entry_or_null(&crtc->commit_list,
10111 				struct drm_crtc_commit, commit_entry);
10112 		if (commit)
10113 			drm_crtc_commit_get(commit);
10114 		spin_unlock(&crtc->commit_lock);
10115 
10116 		if (!commit)
10117 			continue;
10118 
10119 		/*
10120 		 * Make sure all pending HW programming completed and
10121 		 * page flips done
10122 		 */
10123 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10124 
10125 		if (ret > 0)
10126 			ret = wait_for_completion_interruptible_timeout(
10127 					&commit->flip_done, 10*HZ);
10128 
10129 		if (ret == 0)
10130 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10131 				  "timed out\n", crtc->base.id, crtc->name);
10132 
10133 		drm_crtc_commit_put(commit);
10134 	}
10135 
10136 	return ret < 0 ? ret : 0;
10137 }
10138 
10139 static void get_freesync_config_for_crtc(
10140 	struct dm_crtc_state *new_crtc_state,
10141 	struct dm_connector_state *new_con_state)
10142 {
10143 	struct mod_freesync_config config = {0};
10144 	struct amdgpu_dm_connector *aconnector =
10145 			to_amdgpu_dm_connector(new_con_state->base.connector);
10146 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10147 	int vrefresh = drm_mode_vrefresh(mode);
10148 	bool fs_vid_mode = false;
10149 
10150 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10151 					vrefresh >= aconnector->min_vfreq &&
10152 					vrefresh <= aconnector->max_vfreq;
10153 
10154 	if (new_crtc_state->vrr_supported) {
10155 		new_crtc_state->stream->ignore_msa_timing_param = true;
10156 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10157 
10158 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10159 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10160 		config.vsif_supported = true;
10161 		config.btr = true;
10162 
10163 		if (fs_vid_mode) {
10164 			config.state = VRR_STATE_ACTIVE_FIXED;
10165 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10166 			goto out;
10167 		} else if (new_crtc_state->base.vrr_enabled) {
10168 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10169 		} else {
10170 			config.state = VRR_STATE_INACTIVE;
10171 		}
10172 	}
10173 out:
10174 	new_crtc_state->freesync_config = config;
10175 }
10176 
10177 static void reset_freesync_config_for_crtc(
10178 	struct dm_crtc_state *new_crtc_state)
10179 {
10180 	new_crtc_state->vrr_supported = false;
10181 
10182 	memset(&new_crtc_state->vrr_infopacket, 0,
10183 	       sizeof(new_crtc_state->vrr_infopacket));
10184 }
10185 
10186 static bool
10187 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10188 				 struct drm_crtc_state *new_crtc_state)
10189 {
10190 	const struct drm_display_mode *old_mode, *new_mode;
10191 
10192 	if (!old_crtc_state || !new_crtc_state)
10193 		return false;
10194 
10195 	old_mode = &old_crtc_state->mode;
10196 	new_mode = &new_crtc_state->mode;
10197 
10198 	if (old_mode->clock       == new_mode->clock &&
10199 	    old_mode->hdisplay    == new_mode->hdisplay &&
10200 	    old_mode->vdisplay    == new_mode->vdisplay &&
10201 	    old_mode->htotal      == new_mode->htotal &&
10202 	    old_mode->vtotal      != new_mode->vtotal &&
10203 	    old_mode->hsync_start == new_mode->hsync_start &&
10204 	    old_mode->vsync_start != new_mode->vsync_start &&
10205 	    old_mode->hsync_end   == new_mode->hsync_end &&
10206 	    old_mode->vsync_end   != new_mode->vsync_end &&
10207 	    old_mode->hskew       == new_mode->hskew &&
10208 	    old_mode->vscan       == new_mode->vscan &&
10209 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10210 	    (new_mode->vsync_end - new_mode->vsync_start))
10211 		return true;
10212 
10213 	return false;
10214 }
10215 
10216 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10217 	uint64_t num, den, res;
10218 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10219 
10220 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10221 
10222 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10223 	den = (unsigned long long)new_crtc_state->mode.htotal *
10224 	      (unsigned long long)new_crtc_state->mode.vtotal;
10225 
10226 	res = div_u64(num, den);
10227 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10228 }
10229 
10230 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10231 			 struct drm_atomic_state *state,
10232 			 struct drm_crtc *crtc,
10233 			 struct drm_crtc_state *old_crtc_state,
10234 			 struct drm_crtc_state *new_crtc_state,
10235 			 bool enable,
10236 			 bool *lock_and_validation_needed)
10237 {
10238 	struct dm_atomic_state *dm_state = NULL;
10239 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10240 	struct dc_stream_state *new_stream;
10241 	int ret = 0;
10242 
10243 	/*
10244 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10245 	 * update changed items
10246 	 */
10247 	struct amdgpu_crtc *acrtc = NULL;
10248 	struct amdgpu_dm_connector *aconnector = NULL;
10249 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10250 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10251 
10252 	new_stream = NULL;
10253 
10254 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10255 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10256 	acrtc = to_amdgpu_crtc(crtc);
10257 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10258 
10259 	/* TODO This hack should go away */
10260 	if (aconnector && enable) {
10261 		/* Make sure fake sink is created in plug-in scenario */
10262 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10263 							    &aconnector->base);
10264 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10265 							    &aconnector->base);
10266 
10267 		if (IS_ERR(drm_new_conn_state)) {
10268 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10269 			goto fail;
10270 		}
10271 
10272 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10273 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10274 
10275 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10276 			goto skip_modeset;
10277 
10278 		new_stream = create_validate_stream_for_sink(aconnector,
10279 							     &new_crtc_state->mode,
10280 							     dm_new_conn_state,
10281 							     dm_old_crtc_state->stream);
10282 
10283 		/*
10284 		 * we can have no stream on ACTION_SET if a display
10285 		 * was disconnected during S3, in this case it is not an
10286 		 * error, the OS will be updated after detection, and
10287 		 * will do the right thing on next atomic commit
10288 		 */
10289 
10290 		if (!new_stream) {
10291 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10292 					__func__, acrtc->base.base.id);
10293 			ret = -ENOMEM;
10294 			goto fail;
10295 		}
10296 
10297 		/*
10298 		 * TODO: Check VSDB bits to decide whether this should
10299 		 * be enabled or not.
10300 		 */
10301 		new_stream->triggered_crtc_reset.enabled =
10302 			dm->force_timing_sync;
10303 
10304 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10305 
10306 		ret = fill_hdr_info_packet(drm_new_conn_state,
10307 					   &new_stream->hdr_static_metadata);
10308 		if (ret)
10309 			goto fail;
10310 
10311 		/*
10312 		 * If we already removed the old stream from the context
10313 		 * (and set the new stream to NULL) then we can't reuse
10314 		 * the old stream even if the stream and scaling are unchanged.
10315 		 * We'll hit the BUG_ON and black screen.
10316 		 *
10317 		 * TODO: Refactor this function to allow this check to work
10318 		 * in all conditions.
10319 		 */
10320 		if (dm_new_crtc_state->stream &&
10321 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10322 			goto skip_modeset;
10323 
10324 		if (dm_new_crtc_state->stream &&
10325 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10326 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10327 			new_crtc_state->mode_changed = false;
10328 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10329 					 new_crtc_state->mode_changed);
10330 		}
10331 	}
10332 
10333 	/* mode_changed flag may get updated above, need to check again */
10334 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10335 		goto skip_modeset;
10336 
10337 	drm_dbg_state(state->dev,
10338 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10339 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10340 		"connectors_changed:%d\n",
10341 		acrtc->crtc_id,
10342 		new_crtc_state->enable,
10343 		new_crtc_state->active,
10344 		new_crtc_state->planes_changed,
10345 		new_crtc_state->mode_changed,
10346 		new_crtc_state->active_changed,
10347 		new_crtc_state->connectors_changed);
10348 
10349 	/* Remove stream for any changed/disabled CRTC */
10350 	if (!enable) {
10351 
10352 		if (!dm_old_crtc_state->stream)
10353 			goto skip_modeset;
10354 
10355 		if (dm_new_crtc_state->stream &&
10356 		    is_timing_unchanged_for_freesync(new_crtc_state,
10357 						     old_crtc_state)) {
10358 			new_crtc_state->mode_changed = false;
10359 			DRM_DEBUG_DRIVER(
10360 				"Mode change not required for front porch change, "
10361 				"setting mode_changed to %d",
10362 				new_crtc_state->mode_changed);
10363 
10364 			set_freesync_fixed_config(dm_new_crtc_state);
10365 
10366 			goto skip_modeset;
10367 		} else if (aconnector &&
10368 			   is_freesync_video_mode(&new_crtc_state->mode,
10369 						  aconnector)) {
10370 			struct drm_display_mode *high_mode;
10371 
10372 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10373 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10374 				set_freesync_fixed_config(dm_new_crtc_state);
10375 			}
10376 		}
10377 
10378 		ret = dm_atomic_get_state(state, &dm_state);
10379 		if (ret)
10380 			goto fail;
10381 
10382 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10383 				crtc->base.id);
10384 
10385 		/* i.e. reset mode */
10386 		if (dc_remove_stream_from_ctx(
10387 				dm->dc,
10388 				dm_state->context,
10389 				dm_old_crtc_state->stream) != DC_OK) {
10390 			ret = -EINVAL;
10391 			goto fail;
10392 		}
10393 
10394 		dc_stream_release(dm_old_crtc_state->stream);
10395 		dm_new_crtc_state->stream = NULL;
10396 
10397 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10398 
10399 		*lock_and_validation_needed = true;
10400 
10401 	} else {/* Add stream for any updated/enabled CRTC */
10402 		/*
10403 		 * Quick fix to prevent NULL pointer on new_stream when
10404 		 * added MST connectors not found in existing crtc_state in the chained mode
10405 		 * TODO: need to dig out the root cause of that
10406 		 */
10407 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10408 			goto skip_modeset;
10409 
10410 		if (modereset_required(new_crtc_state))
10411 			goto skip_modeset;
10412 
10413 		if (modeset_required(new_crtc_state, new_stream,
10414 				     dm_old_crtc_state->stream)) {
10415 
10416 			WARN_ON(dm_new_crtc_state->stream);
10417 
10418 			ret = dm_atomic_get_state(state, &dm_state);
10419 			if (ret)
10420 				goto fail;
10421 
10422 			dm_new_crtc_state->stream = new_stream;
10423 
10424 			dc_stream_retain(new_stream);
10425 
10426 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10427 					 crtc->base.id);
10428 
10429 			if (dc_add_stream_to_ctx(
10430 					dm->dc,
10431 					dm_state->context,
10432 					dm_new_crtc_state->stream) != DC_OK) {
10433 				ret = -EINVAL;
10434 				goto fail;
10435 			}
10436 
10437 			*lock_and_validation_needed = true;
10438 		}
10439 	}
10440 
10441 skip_modeset:
10442 	/* Release extra reference */
10443 	if (new_stream)
10444 		 dc_stream_release(new_stream);
10445 
10446 	/*
10447 	 * We want to do dc stream updates that do not require a
10448 	 * full modeset below.
10449 	 */
10450 	if (!(enable && aconnector && new_crtc_state->active))
10451 		return 0;
10452 	/*
10453 	 * Given above conditions, the dc state cannot be NULL because:
10454 	 * 1. We're in the process of enabling CRTCs (just been added
10455 	 *    to the dc context, or already is on the context)
10456 	 * 2. Has a valid connector attached, and
10457 	 * 3. Is currently active and enabled.
10458 	 * => The dc stream state currently exists.
10459 	 */
10460 	BUG_ON(dm_new_crtc_state->stream == NULL);
10461 
10462 	/* Scaling or underscan settings */
10463 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10464 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10465 		update_stream_scaling_settings(
10466 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10467 
10468 	/* ABM settings */
10469 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10470 
10471 	/*
10472 	 * Color management settings. We also update color properties
10473 	 * when a modeset is needed, to ensure it gets reprogrammed.
10474 	 */
10475 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10476 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10477 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10478 		if (ret)
10479 			goto fail;
10480 	}
10481 
10482 	/* Update Freesync settings. */
10483 	get_freesync_config_for_crtc(dm_new_crtc_state,
10484 				     dm_new_conn_state);
10485 
10486 	return ret;
10487 
10488 fail:
10489 	if (new_stream)
10490 		dc_stream_release(new_stream);
10491 	return ret;
10492 }
10493 
10494 static bool should_reset_plane(struct drm_atomic_state *state,
10495 			       struct drm_plane *plane,
10496 			       struct drm_plane_state *old_plane_state,
10497 			       struct drm_plane_state *new_plane_state)
10498 {
10499 	struct drm_plane *other;
10500 	struct drm_plane_state *old_other_state, *new_other_state;
10501 	struct drm_crtc_state *new_crtc_state;
10502 	int i;
10503 
10504 	/*
10505 	 * TODO: Remove this hack once the checks below are sufficient
10506 	 * enough to determine when we need to reset all the planes on
10507 	 * the stream.
10508 	 */
10509 	if (state->allow_modeset)
10510 		return true;
10511 
10512 	/* Exit early if we know that we're adding or removing the plane. */
10513 	if (old_plane_state->crtc != new_plane_state->crtc)
10514 		return true;
10515 
10516 	/* old crtc == new_crtc == NULL, plane not in context. */
10517 	if (!new_plane_state->crtc)
10518 		return false;
10519 
10520 	new_crtc_state =
10521 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10522 
10523 	if (!new_crtc_state)
10524 		return true;
10525 
10526 	/* CRTC Degamma changes currently require us to recreate planes. */
10527 	if (new_crtc_state->color_mgmt_changed)
10528 		return true;
10529 
10530 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10531 		return true;
10532 
10533 	/*
10534 	 * If there are any new primary or overlay planes being added or
10535 	 * removed then the z-order can potentially change. To ensure
10536 	 * correct z-order and pipe acquisition the current DC architecture
10537 	 * requires us to remove and recreate all existing planes.
10538 	 *
10539 	 * TODO: Come up with a more elegant solution for this.
10540 	 */
10541 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10542 		struct amdgpu_framebuffer *old_afb, *new_afb;
10543 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10544 			continue;
10545 
10546 		if (old_other_state->crtc != new_plane_state->crtc &&
10547 		    new_other_state->crtc != new_plane_state->crtc)
10548 			continue;
10549 
10550 		if (old_other_state->crtc != new_other_state->crtc)
10551 			return true;
10552 
10553 		/* Src/dst size and scaling updates. */
10554 		if (old_other_state->src_w != new_other_state->src_w ||
10555 		    old_other_state->src_h != new_other_state->src_h ||
10556 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10557 		    old_other_state->crtc_h != new_other_state->crtc_h)
10558 			return true;
10559 
10560 		/* Rotation / mirroring updates. */
10561 		if (old_other_state->rotation != new_other_state->rotation)
10562 			return true;
10563 
10564 		/* Blending updates. */
10565 		if (old_other_state->pixel_blend_mode !=
10566 		    new_other_state->pixel_blend_mode)
10567 			return true;
10568 
10569 		/* Alpha updates. */
10570 		if (old_other_state->alpha != new_other_state->alpha)
10571 			return true;
10572 
10573 		/* Colorspace changes. */
10574 		if (old_other_state->color_range != new_other_state->color_range ||
10575 		    old_other_state->color_encoding != new_other_state->color_encoding)
10576 			return true;
10577 
10578 		/* Framebuffer checks fall at the end. */
10579 		if (!old_other_state->fb || !new_other_state->fb)
10580 			continue;
10581 
10582 		/* Pixel format changes can require bandwidth updates. */
10583 		if (old_other_state->fb->format != new_other_state->fb->format)
10584 			return true;
10585 
10586 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10587 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10588 
10589 		/* Tiling and DCC changes also require bandwidth updates. */
10590 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10591 		    old_afb->base.modifier != new_afb->base.modifier)
10592 			return true;
10593 	}
10594 
10595 	return false;
10596 }
10597 
10598 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10599 			      struct drm_plane_state *new_plane_state,
10600 			      struct drm_framebuffer *fb)
10601 {
10602 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10603 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10604 	unsigned int pitch;
10605 	bool linear;
10606 
10607 	if (fb->width > new_acrtc->max_cursor_width ||
10608 	    fb->height > new_acrtc->max_cursor_height) {
10609 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10610 				 new_plane_state->fb->width,
10611 				 new_plane_state->fb->height);
10612 		return -EINVAL;
10613 	}
10614 	if (new_plane_state->src_w != fb->width << 16 ||
10615 	    new_plane_state->src_h != fb->height << 16) {
10616 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10617 		return -EINVAL;
10618 	}
10619 
10620 	/* Pitch in pixels */
10621 	pitch = fb->pitches[0] / fb->format->cpp[0];
10622 
10623 	if (fb->width != pitch) {
10624 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10625 				 fb->width, pitch);
10626 		return -EINVAL;
10627 	}
10628 
10629 	switch (pitch) {
10630 	case 64:
10631 	case 128:
10632 	case 256:
10633 		/* FB pitch is supported by cursor plane */
10634 		break;
10635 	default:
10636 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10637 		return -EINVAL;
10638 	}
10639 
10640 	/* Core DRM takes care of checking FB modifiers, so we only need to
10641 	 * check tiling flags when the FB doesn't have a modifier. */
10642 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10643 		if (adev->family < AMDGPU_FAMILY_AI) {
10644 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10645 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10646 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10647 		} else {
10648 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10649 		}
10650 		if (!linear) {
10651 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10652 			return -EINVAL;
10653 		}
10654 	}
10655 
10656 	return 0;
10657 }
10658 
10659 static int dm_update_plane_state(struct dc *dc,
10660 				 struct drm_atomic_state *state,
10661 				 struct drm_plane *plane,
10662 				 struct drm_plane_state *old_plane_state,
10663 				 struct drm_plane_state *new_plane_state,
10664 				 bool enable,
10665 				 bool *lock_and_validation_needed)
10666 {
10667 
10668 	struct dm_atomic_state *dm_state = NULL;
10669 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10670 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10671 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10672 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10673 	struct amdgpu_crtc *new_acrtc;
10674 	bool needs_reset;
10675 	int ret = 0;
10676 
10677 
10678 	new_plane_crtc = new_plane_state->crtc;
10679 	old_plane_crtc = old_plane_state->crtc;
10680 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10681 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10682 
10683 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10684 		if (!enable || !new_plane_crtc ||
10685 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10686 			return 0;
10687 
10688 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10689 
10690 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10691 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10692 			return -EINVAL;
10693 		}
10694 
10695 		if (new_plane_state->fb) {
10696 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10697 						 new_plane_state->fb);
10698 			if (ret)
10699 				return ret;
10700 		}
10701 
10702 		return 0;
10703 	}
10704 
10705 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10706 					 new_plane_state);
10707 
10708 	/* Remove any changed/removed planes */
10709 	if (!enable) {
10710 		if (!needs_reset)
10711 			return 0;
10712 
10713 		if (!old_plane_crtc)
10714 			return 0;
10715 
10716 		old_crtc_state = drm_atomic_get_old_crtc_state(
10717 				state, old_plane_crtc);
10718 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10719 
10720 		if (!dm_old_crtc_state->stream)
10721 			return 0;
10722 
10723 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10724 				plane->base.id, old_plane_crtc->base.id);
10725 
10726 		ret = dm_atomic_get_state(state, &dm_state);
10727 		if (ret)
10728 			return ret;
10729 
10730 		if (!dc_remove_plane_from_context(
10731 				dc,
10732 				dm_old_crtc_state->stream,
10733 				dm_old_plane_state->dc_state,
10734 				dm_state->context)) {
10735 
10736 			return -EINVAL;
10737 		}
10738 
10739 
10740 		dc_plane_state_release(dm_old_plane_state->dc_state);
10741 		dm_new_plane_state->dc_state = NULL;
10742 
10743 		*lock_and_validation_needed = true;
10744 
10745 	} else { /* Add new planes */
10746 		struct dc_plane_state *dc_new_plane_state;
10747 
10748 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10749 			return 0;
10750 
10751 		if (!new_plane_crtc)
10752 			return 0;
10753 
10754 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10755 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10756 
10757 		if (!dm_new_crtc_state->stream)
10758 			return 0;
10759 
10760 		if (!needs_reset)
10761 			return 0;
10762 
10763 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10764 		if (ret)
10765 			return ret;
10766 
10767 		WARN_ON(dm_new_plane_state->dc_state);
10768 
10769 		dc_new_plane_state = dc_create_plane_state(dc);
10770 		if (!dc_new_plane_state)
10771 			return -ENOMEM;
10772 
10773 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10774 				 plane->base.id, new_plane_crtc->base.id);
10775 
10776 		ret = fill_dc_plane_attributes(
10777 			drm_to_adev(new_plane_crtc->dev),
10778 			dc_new_plane_state,
10779 			new_plane_state,
10780 			new_crtc_state);
10781 		if (ret) {
10782 			dc_plane_state_release(dc_new_plane_state);
10783 			return ret;
10784 		}
10785 
10786 		ret = dm_atomic_get_state(state, &dm_state);
10787 		if (ret) {
10788 			dc_plane_state_release(dc_new_plane_state);
10789 			return ret;
10790 		}
10791 
10792 		/*
10793 		 * Any atomic check errors that occur after this will
10794 		 * not need a release. The plane state will be attached
10795 		 * to the stream, and therefore part of the atomic
10796 		 * state. It'll be released when the atomic state is
10797 		 * cleaned.
10798 		 */
10799 		if (!dc_add_plane_to_context(
10800 				dc,
10801 				dm_new_crtc_state->stream,
10802 				dc_new_plane_state,
10803 				dm_state->context)) {
10804 
10805 			dc_plane_state_release(dc_new_plane_state);
10806 			return -EINVAL;
10807 		}
10808 
10809 		dm_new_plane_state->dc_state = dc_new_plane_state;
10810 
10811 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10812 
10813 		/* Tell DC to do a full surface update every time there
10814 		 * is a plane change. Inefficient, but works for now.
10815 		 */
10816 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10817 
10818 		*lock_and_validation_needed = true;
10819 	}
10820 
10821 
10822 	return ret;
10823 }
10824 
10825 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10826 				       int *src_w, int *src_h)
10827 {
10828 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10829 	case DRM_MODE_ROTATE_90:
10830 	case DRM_MODE_ROTATE_270:
10831 		*src_w = plane_state->src_h >> 16;
10832 		*src_h = plane_state->src_w >> 16;
10833 		break;
10834 	case DRM_MODE_ROTATE_0:
10835 	case DRM_MODE_ROTATE_180:
10836 	default:
10837 		*src_w = plane_state->src_w >> 16;
10838 		*src_h = plane_state->src_h >> 16;
10839 		break;
10840 	}
10841 }
10842 
10843 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10844 				struct drm_crtc *crtc,
10845 				struct drm_crtc_state *new_crtc_state)
10846 {
10847 	struct drm_plane *cursor = crtc->cursor, *underlying;
10848 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10849 	int i;
10850 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10851 	int cursor_src_w, cursor_src_h;
10852 	int underlying_src_w, underlying_src_h;
10853 
10854 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10855 	 * cursor per pipe but it's going to inherit the scaling and
10856 	 * positioning from the underlying pipe. Check the cursor plane's
10857 	 * blending properties match the underlying planes'. */
10858 
10859 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10860 	if (!new_cursor_state || !new_cursor_state->fb) {
10861 		return 0;
10862 	}
10863 
10864 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10865 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10866 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10867 
10868 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10869 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10870 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10871 			continue;
10872 
10873 		/* Ignore disabled planes */
10874 		if (!new_underlying_state->fb)
10875 			continue;
10876 
10877 		dm_get_oriented_plane_size(new_underlying_state,
10878 					   &underlying_src_w, &underlying_src_h);
10879 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10880 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10881 
10882 		if (cursor_scale_w != underlying_scale_w ||
10883 		    cursor_scale_h != underlying_scale_h) {
10884 			drm_dbg_atomic(crtc->dev,
10885 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10886 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10887 			return -EINVAL;
10888 		}
10889 
10890 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10891 		if (new_underlying_state->crtc_x <= 0 &&
10892 		    new_underlying_state->crtc_y <= 0 &&
10893 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10894 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10895 			break;
10896 	}
10897 
10898 	return 0;
10899 }
10900 
10901 #if defined(CONFIG_DRM_AMD_DC_DCN)
10902 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10903 {
10904 	struct drm_connector *connector;
10905 	struct drm_connector_state *conn_state, *old_conn_state;
10906 	struct amdgpu_dm_connector *aconnector = NULL;
10907 	int i;
10908 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10909 		if (!conn_state->crtc)
10910 			conn_state = old_conn_state;
10911 
10912 		if (conn_state->crtc != crtc)
10913 			continue;
10914 
10915 		aconnector = to_amdgpu_dm_connector(connector);
10916 		if (!aconnector->port || !aconnector->mst_port)
10917 			aconnector = NULL;
10918 		else
10919 			break;
10920 	}
10921 
10922 	if (!aconnector)
10923 		return 0;
10924 
10925 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10926 }
10927 #endif
10928 
10929 /**
10930  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10931  * @dev: The DRM device
10932  * @state: The atomic state to commit
10933  *
10934  * Validate that the given atomic state is programmable by DC into hardware.
10935  * This involves constructing a &struct dc_state reflecting the new hardware
10936  * state we wish to commit, then querying DC to see if it is programmable. It's
10937  * important not to modify the existing DC state. Otherwise, atomic_check
10938  * may unexpectedly commit hardware changes.
10939  *
10940  * When validating the DC state, it's important that the right locks are
10941  * acquired. For full updates case which removes/adds/updates streams on one
10942  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10943  * that any such full update commit will wait for completion of any outstanding
10944  * flip using DRMs synchronization events.
10945  *
10946  * Note that DM adds the affected connectors for all CRTCs in state, when that
10947  * might not seem necessary. This is because DC stream creation requires the
10948  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10949  * be possible but non-trivial - a possible TODO item.
10950  *
10951  * Return: -Error code if validation failed.
10952  */
10953 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10954 				  struct drm_atomic_state *state)
10955 {
10956 	struct amdgpu_device *adev = drm_to_adev(dev);
10957 	struct dm_atomic_state *dm_state = NULL;
10958 	struct dc *dc = adev->dm.dc;
10959 	struct drm_connector *connector;
10960 	struct drm_connector_state *old_con_state, *new_con_state;
10961 	struct drm_crtc *crtc;
10962 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10963 	struct drm_plane *plane;
10964 	struct drm_plane_state *old_plane_state, *new_plane_state;
10965 	enum dc_status status;
10966 	int ret, i;
10967 	bool lock_and_validation_needed = false;
10968 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10969 #if defined(CONFIG_DRM_AMD_DC_DCN)
10970 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10971 	struct drm_dp_mst_topology_state *mst_state;
10972 	struct drm_dp_mst_topology_mgr *mgr;
10973 #endif
10974 
10975 	trace_amdgpu_dm_atomic_check_begin(state);
10976 
10977 	ret = drm_atomic_helper_check_modeset(dev, state);
10978 	if (ret) {
10979 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10980 		goto fail;
10981 	}
10982 
10983 	/* Check connector changes */
10984 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10985 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10986 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10987 
10988 		/* Skip connectors that are disabled or part of modeset already. */
10989 		if (!old_con_state->crtc && !new_con_state->crtc)
10990 			continue;
10991 
10992 		if (!new_con_state->crtc)
10993 			continue;
10994 
10995 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10996 		if (IS_ERR(new_crtc_state)) {
10997 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10998 			ret = PTR_ERR(new_crtc_state);
10999 			goto fail;
11000 		}
11001 
11002 		if (dm_old_con_state->abm_level !=
11003 		    dm_new_con_state->abm_level)
11004 			new_crtc_state->connectors_changed = true;
11005 	}
11006 
11007 #if defined(CONFIG_DRM_AMD_DC_DCN)
11008 	if (dc_resource_is_dsc_encoding_supported(dc)) {
11009 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11010 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11011 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11012 				if (ret) {
11013 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11014 					goto fail;
11015 				}
11016 			}
11017 		}
11018 		pre_validate_dsc(state, &dm_state, vars);
11019 	}
11020 #endif
11021 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11022 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11023 
11024 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11025 		    !new_crtc_state->color_mgmt_changed &&
11026 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11027 			dm_old_crtc_state->dsc_force_changed == false)
11028 			continue;
11029 
11030 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11031 		if (ret) {
11032 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11033 			goto fail;
11034 		}
11035 
11036 		if (!new_crtc_state->enable)
11037 			continue;
11038 
11039 		ret = drm_atomic_add_affected_connectors(state, crtc);
11040 		if (ret) {
11041 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11042 			goto fail;
11043 		}
11044 
11045 		ret = drm_atomic_add_affected_planes(state, crtc);
11046 		if (ret) {
11047 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11048 			goto fail;
11049 		}
11050 
11051 		if (dm_old_crtc_state->dsc_force_changed)
11052 			new_crtc_state->mode_changed = true;
11053 	}
11054 
11055 	/*
11056 	 * Add all primary and overlay planes on the CRTC to the state
11057 	 * whenever a plane is enabled to maintain correct z-ordering
11058 	 * and to enable fast surface updates.
11059 	 */
11060 	drm_for_each_crtc(crtc, dev) {
11061 		bool modified = false;
11062 
11063 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11064 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11065 				continue;
11066 
11067 			if (new_plane_state->crtc == crtc ||
11068 			    old_plane_state->crtc == crtc) {
11069 				modified = true;
11070 				break;
11071 			}
11072 		}
11073 
11074 		if (!modified)
11075 			continue;
11076 
11077 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11078 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11079 				continue;
11080 
11081 			new_plane_state =
11082 				drm_atomic_get_plane_state(state, plane);
11083 
11084 			if (IS_ERR(new_plane_state)) {
11085 				ret = PTR_ERR(new_plane_state);
11086 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11087 				goto fail;
11088 			}
11089 		}
11090 	}
11091 
11092 	/* Remove exiting planes if they are modified */
11093 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11094 		ret = dm_update_plane_state(dc, state, plane,
11095 					    old_plane_state,
11096 					    new_plane_state,
11097 					    false,
11098 					    &lock_and_validation_needed);
11099 		if (ret) {
11100 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11101 			goto fail;
11102 		}
11103 	}
11104 
11105 	/* Disable all crtcs which require disable */
11106 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11107 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11108 					   old_crtc_state,
11109 					   new_crtc_state,
11110 					   false,
11111 					   &lock_and_validation_needed);
11112 		if (ret) {
11113 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11114 			goto fail;
11115 		}
11116 	}
11117 
11118 	/* Enable all crtcs which require enable */
11119 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11120 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11121 					   old_crtc_state,
11122 					   new_crtc_state,
11123 					   true,
11124 					   &lock_and_validation_needed);
11125 		if (ret) {
11126 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11127 			goto fail;
11128 		}
11129 	}
11130 
11131 	/* Add new/modified planes */
11132 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11133 		ret = dm_update_plane_state(dc, state, plane,
11134 					    old_plane_state,
11135 					    new_plane_state,
11136 					    true,
11137 					    &lock_and_validation_needed);
11138 		if (ret) {
11139 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11140 			goto fail;
11141 		}
11142 	}
11143 
11144 	/* Run this here since we want to validate the streams we created */
11145 	ret = drm_atomic_helper_check_planes(dev, state);
11146 	if (ret) {
11147 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11148 		goto fail;
11149 	}
11150 
11151 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11152 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11153 		if (dm_new_crtc_state->mpo_requested)
11154 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11155 	}
11156 
11157 	/* Check cursor planes scaling */
11158 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11159 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11160 		if (ret) {
11161 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11162 			goto fail;
11163 		}
11164 	}
11165 
11166 	if (state->legacy_cursor_update) {
11167 		/*
11168 		 * This is a fast cursor update coming from the plane update
11169 		 * helper, check if it can be done asynchronously for better
11170 		 * performance.
11171 		 */
11172 		state->async_update =
11173 			!drm_atomic_helper_async_check(dev, state);
11174 
11175 		/*
11176 		 * Skip the remaining global validation if this is an async
11177 		 * update. Cursor updates can be done without affecting
11178 		 * state or bandwidth calcs and this avoids the performance
11179 		 * penalty of locking the private state object and
11180 		 * allocating a new dc_state.
11181 		 */
11182 		if (state->async_update)
11183 			return 0;
11184 	}
11185 
11186 	/* Check scaling and underscan changes*/
11187 	/* TODO Removed scaling changes validation due to inability to commit
11188 	 * new stream into context w\o causing full reset. Need to
11189 	 * decide how to handle.
11190 	 */
11191 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11192 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11193 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11194 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11195 
11196 		/* Skip any modesets/resets */
11197 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11198 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11199 			continue;
11200 
11201 		/* Skip any thing not scale or underscan changes */
11202 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11203 			continue;
11204 
11205 		lock_and_validation_needed = true;
11206 	}
11207 
11208 #if defined(CONFIG_DRM_AMD_DC_DCN)
11209 	/* set the slot info for each mst_state based on the link encoding format */
11210 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11211 		struct amdgpu_dm_connector *aconnector;
11212 		struct drm_connector *connector;
11213 		struct drm_connector_list_iter iter;
11214 		u8 link_coding_cap;
11215 
11216 		if (!mgr->mst_state )
11217 			continue;
11218 
11219 		drm_connector_list_iter_begin(dev, &iter);
11220 		drm_for_each_connector_iter(connector, &iter) {
11221 			int id = connector->index;
11222 
11223 			if (id == mst_state->mgr->conn_base_id) {
11224 				aconnector = to_amdgpu_dm_connector(connector);
11225 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11226 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11227 
11228 				break;
11229 			}
11230 		}
11231 		drm_connector_list_iter_end(&iter);
11232 
11233 	}
11234 #endif
11235 	/**
11236 	 * Streams and planes are reset when there are changes that affect
11237 	 * bandwidth. Anything that affects bandwidth needs to go through
11238 	 * DC global validation to ensure that the configuration can be applied
11239 	 * to hardware.
11240 	 *
11241 	 * We have to currently stall out here in atomic_check for outstanding
11242 	 * commits to finish in this case because our IRQ handlers reference
11243 	 * DRM state directly - we can end up disabling interrupts too early
11244 	 * if we don't.
11245 	 *
11246 	 * TODO: Remove this stall and drop DM state private objects.
11247 	 */
11248 	if (lock_and_validation_needed) {
11249 		ret = dm_atomic_get_state(state, &dm_state);
11250 		if (ret) {
11251 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11252 			goto fail;
11253 		}
11254 
11255 		ret = do_aquire_global_lock(dev, state);
11256 		if (ret) {
11257 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11258 			goto fail;
11259 		}
11260 
11261 #if defined(CONFIG_DRM_AMD_DC_DCN)
11262 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11263 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11264 			goto fail;
11265 		}
11266 
11267 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11268 		if (ret) {
11269 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11270 			goto fail;
11271 		}
11272 #endif
11273 
11274 		/*
11275 		 * Perform validation of MST topology in the state:
11276 		 * We need to perform MST atomic check before calling
11277 		 * dc_validate_global_state(), or there is a chance
11278 		 * to get stuck in an infinite loop and hang eventually.
11279 		 */
11280 		ret = drm_dp_mst_atomic_check(state);
11281 		if (ret) {
11282 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11283 			goto fail;
11284 		}
11285 		status = dc_validate_global_state(dc, dm_state->context, true);
11286 		if (status != DC_OK) {
11287 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11288 				       dc_status_to_str(status), status);
11289 			ret = -EINVAL;
11290 			goto fail;
11291 		}
11292 	} else {
11293 		/*
11294 		 * The commit is a fast update. Fast updates shouldn't change
11295 		 * the DC context, affect global validation, and can have their
11296 		 * commit work done in parallel with other commits not touching
11297 		 * the same resource. If we have a new DC context as part of
11298 		 * the DM atomic state from validation we need to free it and
11299 		 * retain the existing one instead.
11300 		 *
11301 		 * Furthermore, since the DM atomic state only contains the DC
11302 		 * context and can safely be annulled, we can free the state
11303 		 * and clear the associated private object now to free
11304 		 * some memory and avoid a possible use-after-free later.
11305 		 */
11306 
11307 		for (i = 0; i < state->num_private_objs; i++) {
11308 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11309 
11310 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11311 				int j = state->num_private_objs-1;
11312 
11313 				dm_atomic_destroy_state(obj,
11314 						state->private_objs[i].state);
11315 
11316 				/* If i is not at the end of the array then the
11317 				 * last element needs to be moved to where i was
11318 				 * before the array can safely be truncated.
11319 				 */
11320 				if (i != j)
11321 					state->private_objs[i] =
11322 						state->private_objs[j];
11323 
11324 				state->private_objs[j].ptr = NULL;
11325 				state->private_objs[j].state = NULL;
11326 				state->private_objs[j].old_state = NULL;
11327 				state->private_objs[j].new_state = NULL;
11328 
11329 				state->num_private_objs = j;
11330 				break;
11331 			}
11332 		}
11333 	}
11334 
11335 	/* Store the overall update type for use later in atomic check. */
11336 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11337 		struct dm_crtc_state *dm_new_crtc_state =
11338 			to_dm_crtc_state(new_crtc_state);
11339 
11340 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11341 							 UPDATE_TYPE_FULL :
11342 							 UPDATE_TYPE_FAST;
11343 	}
11344 
11345 	/* Must be success */
11346 	WARN_ON(ret);
11347 
11348 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11349 
11350 	return ret;
11351 
11352 fail:
11353 	if (ret == -EDEADLK)
11354 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11355 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11356 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11357 	else
11358 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11359 
11360 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11361 
11362 	return ret;
11363 }
11364 
11365 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11366 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11367 {
11368 	uint8_t dpcd_data;
11369 	bool capable = false;
11370 
11371 	if (amdgpu_dm_connector->dc_link &&
11372 		dm_helpers_dp_read_dpcd(
11373 				NULL,
11374 				amdgpu_dm_connector->dc_link,
11375 				DP_DOWN_STREAM_PORT_COUNT,
11376 				&dpcd_data,
11377 				sizeof(dpcd_data))) {
11378 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11379 	}
11380 
11381 	return capable;
11382 }
11383 
11384 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11385 		unsigned int offset,
11386 		unsigned int total_length,
11387 		uint8_t *data,
11388 		unsigned int length,
11389 		struct amdgpu_hdmi_vsdb_info *vsdb)
11390 {
11391 	bool res;
11392 	union dmub_rb_cmd cmd;
11393 	struct dmub_cmd_send_edid_cea *input;
11394 	struct dmub_cmd_edid_cea_output *output;
11395 
11396 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11397 		return false;
11398 
11399 	memset(&cmd, 0, sizeof(cmd));
11400 
11401 	input = &cmd.edid_cea.data.input;
11402 
11403 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11404 	cmd.edid_cea.header.sub_type = 0;
11405 	cmd.edid_cea.header.payload_bytes =
11406 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11407 	input->offset = offset;
11408 	input->length = length;
11409 	input->cea_total_length = total_length;
11410 	memcpy(input->payload, data, length);
11411 
11412 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11413 	if (!res) {
11414 		DRM_ERROR("EDID CEA parser failed\n");
11415 		return false;
11416 	}
11417 
11418 	output = &cmd.edid_cea.data.output;
11419 
11420 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11421 		if (!output->ack.success) {
11422 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11423 					output->ack.offset);
11424 		}
11425 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11426 		if (!output->amd_vsdb.vsdb_found)
11427 			return false;
11428 
11429 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11430 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11431 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11432 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11433 	} else {
11434 		DRM_WARN("Unknown EDID CEA parser results\n");
11435 		return false;
11436 	}
11437 
11438 	return true;
11439 }
11440 
11441 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11442 		uint8_t *edid_ext, int len,
11443 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11444 {
11445 	int i;
11446 
11447 	/* send extension block to DMCU for parsing */
11448 	for (i = 0; i < len; i += 8) {
11449 		bool res;
11450 		int offset;
11451 
11452 		/* send 8 bytes a time */
11453 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11454 			return false;
11455 
11456 		if (i+8 == len) {
11457 			/* EDID block sent completed, expect result */
11458 			int version, min_rate, max_rate;
11459 
11460 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11461 			if (res) {
11462 				/* amd vsdb found */
11463 				vsdb_info->freesync_supported = 1;
11464 				vsdb_info->amd_vsdb_version = version;
11465 				vsdb_info->min_refresh_rate_hz = min_rate;
11466 				vsdb_info->max_refresh_rate_hz = max_rate;
11467 				return true;
11468 			}
11469 			/* not amd vsdb */
11470 			return false;
11471 		}
11472 
11473 		/* check for ack*/
11474 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11475 		if (!res)
11476 			return false;
11477 	}
11478 
11479 	return false;
11480 }
11481 
11482 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11483 		uint8_t *edid_ext, int len,
11484 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11485 {
11486 	int i;
11487 
11488 	/* send extension block to DMCU for parsing */
11489 	for (i = 0; i < len; i += 8) {
11490 		/* send 8 bytes a time */
11491 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11492 			return false;
11493 	}
11494 
11495 	return vsdb_info->freesync_supported;
11496 }
11497 
11498 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11499 		uint8_t *edid_ext, int len,
11500 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11501 {
11502 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11503 
11504 	if (adev->dm.dmub_srv)
11505 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11506 	else
11507 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11508 }
11509 
11510 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11511 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11512 {
11513 	uint8_t *edid_ext = NULL;
11514 	int i;
11515 	bool valid_vsdb_found = false;
11516 
11517 	/*----- drm_find_cea_extension() -----*/
11518 	/* No EDID or EDID extensions */
11519 	if (edid == NULL || edid->extensions == 0)
11520 		return -ENODEV;
11521 
11522 	/* Find CEA extension */
11523 	for (i = 0; i < edid->extensions; i++) {
11524 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11525 		if (edid_ext[0] == CEA_EXT)
11526 			break;
11527 	}
11528 
11529 	if (i == edid->extensions)
11530 		return -ENODEV;
11531 
11532 	/*----- cea_db_offsets() -----*/
11533 	if (edid_ext[0] != CEA_EXT)
11534 		return -ENODEV;
11535 
11536 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11537 
11538 	return valid_vsdb_found ? i : -ENODEV;
11539 }
11540 
11541 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11542 					struct edid *edid)
11543 {
11544 	int i = 0;
11545 	struct detailed_timing *timing;
11546 	struct detailed_non_pixel *data;
11547 	struct detailed_data_monitor_range *range;
11548 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11549 			to_amdgpu_dm_connector(connector);
11550 	struct dm_connector_state *dm_con_state = NULL;
11551 	struct dc_sink *sink;
11552 
11553 	struct drm_device *dev = connector->dev;
11554 	struct amdgpu_device *adev = drm_to_adev(dev);
11555 	bool freesync_capable = false;
11556 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11557 
11558 	if (!connector->state) {
11559 		DRM_ERROR("%s - Connector has no state", __func__);
11560 		goto update;
11561 	}
11562 
11563 	sink = amdgpu_dm_connector->dc_sink ?
11564 		amdgpu_dm_connector->dc_sink :
11565 		amdgpu_dm_connector->dc_em_sink;
11566 
11567 	if (!edid || !sink) {
11568 		dm_con_state = to_dm_connector_state(connector->state);
11569 
11570 		amdgpu_dm_connector->min_vfreq = 0;
11571 		amdgpu_dm_connector->max_vfreq = 0;
11572 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11573 		connector->display_info.monitor_range.min_vfreq = 0;
11574 		connector->display_info.monitor_range.max_vfreq = 0;
11575 		freesync_capable = false;
11576 
11577 		goto update;
11578 	}
11579 
11580 	dm_con_state = to_dm_connector_state(connector->state);
11581 
11582 	if (!adev->dm.freesync_module)
11583 		goto update;
11584 
11585 
11586 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11587 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11588 		bool edid_check_required = false;
11589 
11590 		if (edid) {
11591 			edid_check_required = is_dp_capable_without_timing_msa(
11592 						adev->dm.dc,
11593 						amdgpu_dm_connector);
11594 		}
11595 
11596 		if (edid_check_required == true && (edid->version > 1 ||
11597 		   (edid->version == 1 && edid->revision > 1))) {
11598 			for (i = 0; i < 4; i++) {
11599 
11600 				timing	= &edid->detailed_timings[i];
11601 				data	= &timing->data.other_data;
11602 				range	= &data->data.range;
11603 				/*
11604 				 * Check if monitor has continuous frequency mode
11605 				 */
11606 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11607 					continue;
11608 				/*
11609 				 * Check for flag range limits only. If flag == 1 then
11610 				 * no additional timing information provided.
11611 				 * Default GTF, GTF Secondary curve and CVT are not
11612 				 * supported
11613 				 */
11614 				if (range->flags != 1)
11615 					continue;
11616 
11617 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11618 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11619 				amdgpu_dm_connector->pixel_clock_mhz =
11620 					range->pixel_clock_mhz * 10;
11621 
11622 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11623 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11624 
11625 				break;
11626 			}
11627 
11628 			if (amdgpu_dm_connector->max_vfreq -
11629 			    amdgpu_dm_connector->min_vfreq > 10) {
11630 
11631 				freesync_capable = true;
11632 			}
11633 		}
11634 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11635 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11636 		if (i >= 0 && vsdb_info.freesync_supported) {
11637 			timing  = &edid->detailed_timings[i];
11638 			data    = &timing->data.other_data;
11639 
11640 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11641 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11642 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11643 				freesync_capable = true;
11644 
11645 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11646 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11647 		}
11648 	}
11649 
11650 update:
11651 	if (dm_con_state)
11652 		dm_con_state->freesync_capable = freesync_capable;
11653 
11654 	if (connector->vrr_capable_property)
11655 		drm_connector_set_vrr_capable_property(connector,
11656 						       freesync_capable);
11657 }
11658 
11659 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11660 {
11661 	struct amdgpu_device *adev = drm_to_adev(dev);
11662 	struct dc *dc = adev->dm.dc;
11663 	int i;
11664 
11665 	mutex_lock(&adev->dm.dc_lock);
11666 	if (dc->current_state) {
11667 		for (i = 0; i < dc->current_state->stream_count; ++i)
11668 			dc->current_state->streams[i]
11669 				->triggered_crtc_reset.enabled =
11670 				adev->dm.force_timing_sync;
11671 
11672 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11673 		dc_trigger_sync(dc, dc->current_state);
11674 	}
11675 	mutex_unlock(&adev->dm.dc_lock);
11676 }
11677 
11678 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11679 		       uint32_t value, const char *func_name)
11680 {
11681 #ifdef DM_CHECK_ADDR_0
11682 	if (address == 0) {
11683 		DC_ERR("invalid register write. address = 0");
11684 		return;
11685 	}
11686 #endif
11687 	cgs_write_register(ctx->cgs_device, address, value);
11688 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11689 }
11690 
11691 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11692 			  const char *func_name)
11693 {
11694 	uint32_t value;
11695 #ifdef DM_CHECK_ADDR_0
11696 	if (address == 0) {
11697 		DC_ERR("invalid register read; address = 0\n");
11698 		return 0;
11699 	}
11700 #endif
11701 
11702 	if (ctx->dmub_srv &&
11703 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11704 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11705 		ASSERT(false);
11706 		return 0;
11707 	}
11708 
11709 	value = cgs_read_register(ctx->cgs_device, address);
11710 
11711 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11712 
11713 	return value;
11714 }
11715 
11716 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11717 						struct dc_context *ctx,
11718 						uint8_t status_type,
11719 						uint32_t *operation_result)
11720 {
11721 	struct amdgpu_device *adev = ctx->driver_context;
11722 	int return_status = -1;
11723 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11724 
11725 	if (is_cmd_aux) {
11726 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11727 			return_status = p_notify->aux_reply.length;
11728 			*operation_result = p_notify->result;
11729 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11730 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11731 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11732 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11733 		} else {
11734 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11735 		}
11736 	} else {
11737 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11738 			return_status = 0;
11739 			*operation_result = p_notify->sc_status;
11740 		} else {
11741 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11742 		}
11743 	}
11744 
11745 	return return_status;
11746 }
11747 
11748 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11749 	unsigned int link_index, void *cmd_payload, void *operation_result)
11750 {
11751 	struct amdgpu_device *adev = ctx->driver_context;
11752 	int ret = 0;
11753 
11754 	if (is_cmd_aux) {
11755 		dc_process_dmub_aux_transfer_async(ctx->dc,
11756 			link_index, (struct aux_payload *)cmd_payload);
11757 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11758 					(struct set_config_cmd_payload *)cmd_payload,
11759 					adev->dm.dmub_notify)) {
11760 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11761 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11762 					(uint32_t *)operation_result);
11763 	}
11764 
11765 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11766 	if (ret == 0) {
11767 		DRM_ERROR("wait_for_completion_timeout timeout!");
11768 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11769 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11770 				(uint32_t *)operation_result);
11771 	}
11772 
11773 	if (is_cmd_aux) {
11774 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11775 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11776 
11777 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11778 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11779 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11780 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11781 				       adev->dm.dmub_notify->aux_reply.length);
11782 			}
11783 		}
11784 	}
11785 
11786 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11787 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11788 			(uint32_t *)operation_result);
11789 }
11790 
11791 /*
11792  * Check whether seamless boot is supported.
11793  *
11794  * So far we only support seamless boot on CHIP_VANGOGH.
11795  * If everything goes well, we may consider expanding
11796  * seamless boot to other ASICs.
11797  */
11798 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11799 {
11800 	switch (adev->asic_type) {
11801 	case CHIP_VANGOGH:
11802 		if (!adev->mman.keep_stolen_vga_memory)
11803 			return true;
11804 		break;
11805 	default:
11806 		break;
11807 	}
11808 
11809 	return false;
11810 }
11811