xref: /openbmc/linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 869b6ca39c08c5b10eeb29d4b3c4bc433bf8ba5e)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85 
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93 
94 #include "soc15_common.h"
95 #endif
96 
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100 
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 
118 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120 
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123 
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126 
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129 
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139 
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144 
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147 	switch (link->dpcd_caps.dongle_type) {
148 	case DISPLAY_DONGLE_NONE:
149 		return DRM_MODE_SUBCONNECTOR_Native;
150 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 		return DRM_MODE_SUBCONNECTOR_VGA;
152 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 		return DRM_MODE_SUBCONNECTOR_DVID;
155 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 		return DRM_MODE_SUBCONNECTOR_HDMIA;
158 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 	default:
160 		return DRM_MODE_SUBCONNECTOR_Unknown;
161 	}
162 }
163 
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166 	struct dc_link *link = aconnector->dc_link;
167 	struct drm_connector *connector = &aconnector->base;
168 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169 
170 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 		return;
172 
173 	if (aconnector->dc_sink)
174 		subconnector = get_subconnector_type(link);
175 
176 	drm_object_property_set_value(&connector->base,
177 			connector->dev->mode_config.dp_subconnector_property,
178 			subconnector);
179 }
180 
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191 
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193 				struct drm_plane *plane,
194 				unsigned long possible_crtcs,
195 				const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 			       struct drm_plane *plane,
198 			       uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
201 				    uint32_t link_index,
202 				    struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 				  struct amdgpu_encoder *aencoder,
205 				  uint32_t link_index);
206 
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208 
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210 
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 				  struct drm_atomic_state *state);
213 
214 static void handle_cursor_update(struct drm_plane *plane,
215 				 struct drm_plane_state *old_plane_state);
216 
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
222 
223 static bool
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 				 struct drm_crtc_state *new_crtc_state);
226 /*
227  * dm_vblank_get_counter
228  *
229  * @brief
230  * Get counter for number of vertical blanks
231  *
232  * @param
233  * struct amdgpu_device *adev - [in] desired amdgpu device
234  * int disp_idx - [in] which CRTC to get the counter from
235  *
236  * @return
237  * Counter for vertical blanks
238  */
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240 {
241 	if (crtc >= adev->mode_info.num_crtc)
242 		return 0;
243 	else {
244 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245 
246 		if (acrtc->dm_irq_params.stream == NULL) {
247 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248 				  crtc);
249 			return 0;
250 		}
251 
252 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253 	}
254 }
255 
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 				  u32 *vbl, u32 *position)
258 {
259 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
260 
261 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 		return -EINVAL;
263 	else {
264 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265 
266 		if (acrtc->dm_irq_params.stream ==  NULL) {
267 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 				  crtc);
269 			return 0;
270 		}
271 
272 		/*
273 		 * TODO rework base driver to use values directly.
274 		 * for now parse it back into reg-format
275 		 */
276 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277 					 &v_blank_start,
278 					 &v_blank_end,
279 					 &h_position,
280 					 &v_position);
281 
282 		*position = v_position | (h_position << 16);
283 		*vbl = v_blank_start | (v_blank_end << 16);
284 	}
285 
286 	return 0;
287 }
288 
289 static bool dm_is_idle(void *handle)
290 {
291 	/* XXX todo */
292 	return true;
293 }
294 
295 static int dm_wait_for_idle(void *handle)
296 {
297 	/* XXX todo */
298 	return 0;
299 }
300 
301 static bool dm_check_soft_reset(void *handle)
302 {
303 	return false;
304 }
305 
306 static int dm_soft_reset(void *handle)
307 {
308 	/* XXX todo */
309 	return 0;
310 }
311 
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 		     int otg_inst)
315 {
316 	struct drm_device *dev = adev_to_drm(adev);
317 	struct drm_crtc *crtc;
318 	struct amdgpu_crtc *amdgpu_crtc;
319 
320 	if (WARN_ON(otg_inst == -1))
321 		return adev->mode_info.crtcs[0];
322 
323 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 		amdgpu_crtc = to_amdgpu_crtc(crtc);
325 
326 		if (amdgpu_crtc->otg_inst == otg_inst)
327 			return amdgpu_crtc;
328 	}
329 
330 	return NULL;
331 }
332 
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335 	return acrtc->dm_irq_params.freesync_config.state ==
336 		       VRR_STATE_ACTIVE_VARIABLE ||
337 	       acrtc->dm_irq_params.freesync_config.state ==
338 		       VRR_STATE_ACTIVE_FIXED;
339 }
340 
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346 
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 					      struct dm_crtc_state *new_state)
349 {
350 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351 		return true;
352 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 		return true;
354 	else
355 		return false;
356 }
357 
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367 	struct amdgpu_crtc *amdgpu_crtc;
368 	struct common_irq_params *irq_params = interrupt_params;
369 	struct amdgpu_device *adev = irq_params->adev;
370 	unsigned long flags;
371 	struct drm_pending_vblank_event *e;
372 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 	bool vrr_active;
374 
375 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376 
377 	/* IRQ could occur when in initial stage */
378 	/* TODO work and BO cleanup */
379 	if (amdgpu_crtc == NULL) {
380 		DC_LOG_PFLIP("CRTC is null, returning.\n");
381 		return;
382 	}
383 
384 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385 
386 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388 						 amdgpu_crtc->pflip_status,
389 						 AMDGPU_FLIP_SUBMITTED,
390 						 amdgpu_crtc->crtc_id,
391 						 amdgpu_crtc);
392 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393 		return;
394 	}
395 
396 	/* page flip completed. */
397 	e = amdgpu_crtc->event;
398 	amdgpu_crtc->event = NULL;
399 
400 	WARN_ON(!e);
401 
402 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403 
404 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
405 	if (!vrr_active ||
406 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 				      &v_blank_end, &hpos, &vpos) ||
408 	    (vpos < v_blank_start)) {
409 		/* Update to correct count and vblank timestamp if racing with
410 		 * vblank irq. This also updates to the correct vblank timestamp
411 		 * even in VRR mode, as scanout is past the front-porch atm.
412 		 */
413 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414 
415 		/* Wake up userspace by sending the pageflip event with proper
416 		 * count and timestamp of vblank of flip completion.
417 		 */
418 		if (e) {
419 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420 
421 			/* Event sent, so done with vblank for this flip */
422 			drm_crtc_vblank_put(&amdgpu_crtc->base);
423 		}
424 	} else if (e) {
425 		/* VRR active and inside front-porch: vblank count and
426 		 * timestamp for pageflip event will only be up to date after
427 		 * drm_crtc_handle_vblank() has been executed from late vblank
428 		 * irq handler after start of back-porch (vline 0). We queue the
429 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 		 * updated timestamp and count, once it runs after us.
431 		 *
432 		 * We need to open-code this instead of using the helper
433 		 * drm_crtc_arm_vblank_event(), as that helper would
434 		 * call drm_crtc_accurate_vblank_count(), which we must
435 		 * not call in VRR mode while we are in front-porch!
436 		 */
437 
438 		/* sequence will be replaced by real count during send-out. */
439 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 		e->pipe = amdgpu_crtc->crtc_id;
441 
442 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443 		e = NULL;
444 	}
445 
446 	/* Keep track of vblank of this flip for flip throttling. We use the
447 	 * cooked hw counter, as that one incremented at start of this vblank
448 	 * of pageflip completion, so last_flip_vblank is the forbidden count
449 	 * for queueing new pageflips if vsync + VRR is enabled.
450 	 */
451 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453 
454 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456 
457 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
459 		     vrr_active, (int) !e);
460 }
461 
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464 	struct common_irq_params *irq_params = interrupt_params;
465 	struct amdgpu_device *adev = irq_params->adev;
466 	struct amdgpu_crtc *acrtc;
467 	struct drm_device *drm_dev;
468 	struct drm_vblank_crtc *vblank;
469 	ktime_t frame_duration_ns, previous_timestamp;
470 	unsigned long flags;
471 	int vrr_active;
472 
473 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474 
475 	if (acrtc) {
476 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 		drm_dev = acrtc->base.dev;
478 		vblank = &drm_dev->vblank[acrtc->base.index];
479 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 		frame_duration_ns = vblank->time - previous_timestamp;
481 
482 		if (frame_duration_ns > 0) {
483 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
484 						frame_duration_ns,
485 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 		}
488 
489 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490 			      acrtc->crtc_id,
491 			      vrr_active);
492 
493 		/* Core vblank handling is done here after end of front-porch in
494 		 * vrr mode, as vblank timestamping will give valid results
495 		 * while now done after front-porch. This will also deliver
496 		 * page-flip completion events that have been queued to us
497 		 * if a pageflip happened inside front-porch.
498 		 */
499 		if (vrr_active) {
500 			drm_crtc_handle_vblank(&acrtc->base);
501 
502 			/* BTR processing for pre-DCE12 ASICs */
503 			if (acrtc->dm_irq_params.stream &&
504 			    adev->family < AMDGPU_FAMILY_AI) {
505 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 				mod_freesync_handle_v_update(
507 				    adev->dm.freesync_module,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params);
510 
511 				dc_stream_adjust_vmin_vmax(
512 				    adev->dm.dc,
513 				    acrtc->dm_irq_params.stream,
514 				    &acrtc->dm_irq_params.vrr_params.adjust);
515 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516 			}
517 		}
518 	}
519 }
520 
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530 	struct common_irq_params *irq_params = interrupt_params;
531 	struct amdgpu_device *adev = irq_params->adev;
532 	struct amdgpu_crtc *acrtc;
533 	unsigned long flags;
534 	int vrr_active;
535 
536 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537 	if (!acrtc)
538 		return;
539 
540 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541 
542 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 		      vrr_active, acrtc->dm_irq_params.active_planes);
544 
545 	/**
546 	 * Core vblank handling at start of front-porch is only possible
547 	 * in non-vrr mode, as only there vblank timestamping will give
548 	 * valid results while done in front-porch. Otherwise defer it
549 	 * to dm_vupdate_high_irq after end of front-porch.
550 	 */
551 	if (!vrr_active)
552 		drm_crtc_handle_vblank(&acrtc->base);
553 
554 	/**
555 	 * Following stuff must happen at start of vblank, for crc
556 	 * computation and below-the-range btr support in vrr mode.
557 	 */
558 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559 
560 	/* BTR updates need to happen before VUPDATE on Vega and above. */
561 	if (adev->family < AMDGPU_FAMILY_AI)
562 		return;
563 
564 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565 
566 	if (acrtc->dm_irq_params.stream &&
567 	    acrtc->dm_irq_params.vrr_params.supported &&
568 	    acrtc->dm_irq_params.freesync_config.state ==
569 		    VRR_STATE_ACTIVE_VARIABLE) {
570 		mod_freesync_handle_v_update(adev->dm.freesync_module,
571 					     acrtc->dm_irq_params.stream,
572 					     &acrtc->dm_irq_params.vrr_params);
573 
574 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 					   &acrtc->dm_irq_params.vrr_params.adjust);
576 	}
577 
578 	/*
579 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 	 * In that case, pageflip completion interrupts won't fire and pageflip
581 	 * completion events won't get delivered. Prevent this by sending
582 	 * pending pageflip events from here if a flip is still pending.
583 	 *
584 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 	 * avoid race conditions between flip programming and completion,
586 	 * which could cause too early flip completion events.
587 	 */
588 	if (adev->family >= AMDGPU_FAMILY_RV &&
589 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 	    acrtc->dm_irq_params.active_planes == 0) {
591 		if (acrtc->event) {
592 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593 			acrtc->event = NULL;
594 			drm_crtc_vblank_put(&acrtc->base);
595 		}
596 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 	}
598 
599 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601 
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt_params: interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613 	struct common_irq_params *irq_params = interrupt_params;
614 	struct amdgpu_device *adev = irq_params->adev;
615 	struct amdgpu_crtc *acrtc;
616 
617 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618 
619 	if (!acrtc)
620 		return;
621 
622 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625 
626 /**
627  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628  * @adev: amdgpu_device pointer
629  * @notify: dmub notification structure
630  *
631  * Dmub AUX or SET_CONFIG command completion processing callback
632  * Copies dmub notification to DM which is to be read by AUX command.
633  * issuing thread and also signals the event to wake up the thread.
634  */
635 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
636 {
637 	if (adev->dm.dmub_notify)
638 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640 		complete(&adev->dm.dmub_aux_transfer_done);
641 }
642 
643 /**
644  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645  * @adev: amdgpu_device pointer
646  * @notify: dmub notification structure
647  *
648  * Dmub Hpd interrupt processing callback. Gets displayindex through the
649  * ink index and calls helper to do the processing.
650  */
651 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
652 {
653 	struct amdgpu_dm_connector *aconnector;
654 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
655 	struct drm_connector *connector;
656 	struct drm_connector_list_iter iter;
657 	struct dc_link *link;
658 	uint8_t link_index = 0;
659 	struct drm_device *dev = adev->dm.ddev;
660 
661 	if (adev == NULL)
662 		return;
663 
664 	if (notify == NULL) {
665 		DRM_ERROR("DMUB HPD callback notification was NULL");
666 		return;
667 	}
668 
669 	if (notify->link_index > adev->dm.dc->link_count) {
670 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671 		return;
672 	}
673 
674 	link_index = notify->link_index;
675 	link = adev->dm.dc->links[link_index];
676 
677 	drm_connector_list_iter_begin(dev, &iter);
678 	drm_for_each_connector_iter(connector, &iter) {
679 		aconnector = to_amdgpu_dm_connector(connector);
680 		if (link && aconnector->dc_link == link) {
681 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
682 			hpd_aconnector = aconnector;
683 			break;
684 		}
685 	}
686 	drm_connector_list_iter_end(&iter);
687 
688 	if (hpd_aconnector) {
689 		if (notify->type == DMUB_NOTIFICATION_HPD)
690 			handle_hpd_irq_helper(hpd_aconnector);
691 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692 			handle_hpd_rx_irq(hpd_aconnector);
693 	}
694 }
695 
696 /**
697  * register_dmub_notify_callback - Sets callback for DMUB notify
698  * @adev: amdgpu_device pointer
699  * @type: Type of dmub notification
700  * @callback: Dmub interrupt callback function
701  * @dmub_int_thread_offload: offload indicator
702  *
703  * API to register a dmub callback handler for a dmub notification
704  * Also sets indicator whether callback processing to be offloaded.
705  * to dmub interrupt handling thread
706  * Return: true if successfully registered, false if there is existing registration
707  */
708 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
710 {
711 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712 		adev->dm.dmub_callback[type] = callback;
713 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
714 	} else
715 		return false;
716 
717 	return true;
718 }
719 
720 static void dm_handle_hpd_work(struct work_struct *work)
721 {
722 	struct dmub_hpd_work *dmub_hpd_wrk;
723 
724 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
725 
726 	if (!dmub_hpd_wrk->dmub_notify) {
727 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
728 		return;
729 	}
730 
731 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733 		dmub_hpd_wrk->dmub_notify);
734 	}
735 
736 	kfree(dmub_hpd_wrk->dmub_notify);
737 	kfree(dmub_hpd_wrk);
738 
739 }
740 
741 #define DMUB_TRACE_MAX_READ 64
742 /**
743  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744  * @interrupt_params: used for determining the Outbox instance
745  *
746  * Handles the Outbox Interrupt
747  * event handler.
748  */
749 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
750 {
751 	struct dmub_notification notify;
752 	struct common_irq_params *irq_params = interrupt_params;
753 	struct amdgpu_device *adev = irq_params->adev;
754 	struct amdgpu_display_manager *dm = &adev->dm;
755 	struct dmcub_trace_buf_entry entry = { 0 };
756 	uint32_t count = 0;
757 	struct dmub_hpd_work *dmub_hpd_wrk;
758 	struct dc_link *plink = NULL;
759 
760 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
761 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
762 
763 		do {
764 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
765 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
767 				continue;
768 			}
769 			if (!dm->dmub_callback[notify.type]) {
770 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
771 				continue;
772 			}
773 			if (dm->dmub_thread_offload[notify.type] == true) {
774 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
775 				if (!dmub_hpd_wrk) {
776 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
777 					return;
778 				}
779 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780 				if (!dmub_hpd_wrk->dmub_notify) {
781 					kfree(dmub_hpd_wrk);
782 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
783 					return;
784 				}
785 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786 				if (dmub_hpd_wrk->dmub_notify)
787 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
788 				dmub_hpd_wrk->adev = adev;
789 				if (notify.type == DMUB_NOTIFICATION_HPD) {
790 					plink = adev->dm.dc->links[notify.link_index];
791 					if (plink) {
792 						plink->hpd_status =
793 							notify.hpd_status ==
794 							DP_HPD_PLUG ? true : false;
795 					}
796 				}
797 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
798 			} else {
799 				dm->dmub_callback[notify.type](adev, &notify);
800 			}
801 		} while (notify.pending_notification);
802 	}
803 
804 
805 	do {
806 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
807 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
808 							entry.param0, entry.param1);
809 
810 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
811 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
812 		} else
813 			break;
814 
815 		count++;
816 
817 	} while (count <= DMUB_TRACE_MAX_READ);
818 
819 	if (count > DMUB_TRACE_MAX_READ)
820 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
821 }
822 #endif /* CONFIG_DRM_AMD_DC_DCN */
823 
824 static int dm_set_clockgating_state(void *handle,
825 		  enum amd_clockgating_state state)
826 {
827 	return 0;
828 }
829 
830 static int dm_set_powergating_state(void *handle,
831 		  enum amd_powergating_state state)
832 {
833 	return 0;
834 }
835 
836 /* Prototypes of private functions */
837 static int dm_early_init(void* handle);
838 
839 /* Allocate memory for FBC compressed data  */
840 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
841 {
842 	struct drm_device *dev = connector->dev;
843 	struct amdgpu_device *adev = drm_to_adev(dev);
844 	struct dm_compressor_info *compressor = &adev->dm.compressor;
845 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
846 	struct drm_display_mode *mode;
847 	unsigned long max_size = 0;
848 
849 	if (adev->dm.dc->fbc_compressor == NULL)
850 		return;
851 
852 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
853 		return;
854 
855 	if (compressor->bo_ptr)
856 		return;
857 
858 
859 	list_for_each_entry(mode, &connector->modes, head) {
860 		if (max_size < mode->htotal * mode->vtotal)
861 			max_size = mode->htotal * mode->vtotal;
862 	}
863 
864 	if (max_size) {
865 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
866 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
867 			    &compressor->gpu_addr, &compressor->cpu_addr);
868 
869 		if (r)
870 			DRM_ERROR("DM: Failed to initialize FBC\n");
871 		else {
872 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
873 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
874 		}
875 
876 	}
877 
878 }
879 
880 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
881 					  int pipe, bool *enabled,
882 					  unsigned char *buf, int max_bytes)
883 {
884 	struct drm_device *dev = dev_get_drvdata(kdev);
885 	struct amdgpu_device *adev = drm_to_adev(dev);
886 	struct drm_connector *connector;
887 	struct drm_connector_list_iter conn_iter;
888 	struct amdgpu_dm_connector *aconnector;
889 	int ret = 0;
890 
891 	*enabled = false;
892 
893 	mutex_lock(&adev->dm.audio_lock);
894 
895 	drm_connector_list_iter_begin(dev, &conn_iter);
896 	drm_for_each_connector_iter(connector, &conn_iter) {
897 		aconnector = to_amdgpu_dm_connector(connector);
898 		if (aconnector->audio_inst != port)
899 			continue;
900 
901 		*enabled = true;
902 		ret = drm_eld_size(connector->eld);
903 		memcpy(buf, connector->eld, min(max_bytes, ret));
904 
905 		break;
906 	}
907 	drm_connector_list_iter_end(&conn_iter);
908 
909 	mutex_unlock(&adev->dm.audio_lock);
910 
911 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
912 
913 	return ret;
914 }
915 
916 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
917 	.get_eld = amdgpu_dm_audio_component_get_eld,
918 };
919 
920 static int amdgpu_dm_audio_component_bind(struct device *kdev,
921 				       struct device *hda_kdev, void *data)
922 {
923 	struct drm_device *dev = dev_get_drvdata(kdev);
924 	struct amdgpu_device *adev = drm_to_adev(dev);
925 	struct drm_audio_component *acomp = data;
926 
927 	acomp->ops = &amdgpu_dm_audio_component_ops;
928 	acomp->dev = kdev;
929 	adev->dm.audio_component = acomp;
930 
931 	return 0;
932 }
933 
934 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
935 					  struct device *hda_kdev, void *data)
936 {
937 	struct drm_device *dev = dev_get_drvdata(kdev);
938 	struct amdgpu_device *adev = drm_to_adev(dev);
939 	struct drm_audio_component *acomp = data;
940 
941 	acomp->ops = NULL;
942 	acomp->dev = NULL;
943 	adev->dm.audio_component = NULL;
944 }
945 
946 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
947 	.bind	= amdgpu_dm_audio_component_bind,
948 	.unbind	= amdgpu_dm_audio_component_unbind,
949 };
950 
951 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
952 {
953 	int i, ret;
954 
955 	if (!amdgpu_audio)
956 		return 0;
957 
958 	adev->mode_info.audio.enabled = true;
959 
960 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
961 
962 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
963 		adev->mode_info.audio.pin[i].channels = -1;
964 		adev->mode_info.audio.pin[i].rate = -1;
965 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
966 		adev->mode_info.audio.pin[i].status_bits = 0;
967 		adev->mode_info.audio.pin[i].category_code = 0;
968 		adev->mode_info.audio.pin[i].connected = false;
969 		adev->mode_info.audio.pin[i].id =
970 			adev->dm.dc->res_pool->audios[i]->inst;
971 		adev->mode_info.audio.pin[i].offset = 0;
972 	}
973 
974 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
975 	if (ret < 0)
976 		return ret;
977 
978 	adev->dm.audio_registered = true;
979 
980 	return 0;
981 }
982 
983 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
984 {
985 	if (!amdgpu_audio)
986 		return;
987 
988 	if (!adev->mode_info.audio.enabled)
989 		return;
990 
991 	if (adev->dm.audio_registered) {
992 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
993 		adev->dm.audio_registered = false;
994 	}
995 
996 	/* TODO: Disable audio? */
997 
998 	adev->mode_info.audio.enabled = false;
999 }
1000 
1001 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1002 {
1003 	struct drm_audio_component *acomp = adev->dm.audio_component;
1004 
1005 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1006 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1007 
1008 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1009 						 pin, -1);
1010 	}
1011 }
1012 
1013 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1014 {
1015 	const struct dmcub_firmware_header_v1_0 *hdr;
1016 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1017 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1018 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1019 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1020 	struct abm *abm = adev->dm.dc->res_pool->abm;
1021 	struct dmub_srv_hw_params hw_params;
1022 	enum dmub_status status;
1023 	const unsigned char *fw_inst_const, *fw_bss_data;
1024 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1025 	bool has_hw_support;
1026 	struct dc *dc = adev->dm.dc;
1027 
1028 	if (!dmub_srv)
1029 		/* DMUB isn't supported on the ASIC. */
1030 		return 0;
1031 
1032 	if (!fb_info) {
1033 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1034 		return -EINVAL;
1035 	}
1036 
1037 	if (!dmub_fw) {
1038 		/* Firmware required for DMUB support. */
1039 		DRM_ERROR("No firmware provided for DMUB.\n");
1040 		return -EINVAL;
1041 	}
1042 
1043 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1044 	if (status != DMUB_STATUS_OK) {
1045 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1046 		return -EINVAL;
1047 	}
1048 
1049 	if (!has_hw_support) {
1050 		DRM_INFO("DMUB unsupported on ASIC\n");
1051 		return 0;
1052 	}
1053 
1054 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1055 	status = dmub_srv_hw_reset(dmub_srv);
1056 	if (status != DMUB_STATUS_OK)
1057 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1058 
1059 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1060 
1061 	fw_inst_const = dmub_fw->data +
1062 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1063 			PSP_HEADER_BYTES;
1064 
1065 	fw_bss_data = dmub_fw->data +
1066 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1067 		      le32_to_cpu(hdr->inst_const_bytes);
1068 
1069 	/* Copy firmware and bios info into FB memory. */
1070 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1071 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1072 
1073 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1074 
1075 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1076 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1077 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1078 	 * will be done by dm_dmub_hw_init
1079 	 */
1080 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1081 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1082 				fw_inst_const_size);
1083 	}
1084 
1085 	if (fw_bss_data_size)
1086 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1087 		       fw_bss_data, fw_bss_data_size);
1088 
1089 	/* Copy firmware bios info into FB memory. */
1090 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1091 	       adev->bios_size);
1092 
1093 	/* Reset regions that need to be reset. */
1094 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1095 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1096 
1097 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1098 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1099 
1100 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1101 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1102 
1103 	/* Initialize hardware. */
1104 	memset(&hw_params, 0, sizeof(hw_params));
1105 	hw_params.fb_base = adev->gmc.fb_start;
1106 	hw_params.fb_offset = adev->gmc.aper_base;
1107 
1108 	/* backdoor load firmware and trigger dmub running */
1109 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1110 		hw_params.load_inst_const = true;
1111 
1112 	if (dmcu)
1113 		hw_params.psp_version = dmcu->psp_version;
1114 
1115 	for (i = 0; i < fb_info->num_fb; ++i)
1116 		hw_params.fb[i] = &fb_info->fb[i];
1117 
1118 	switch (adev->asic_type) {
1119 	case CHIP_YELLOW_CARP:
1120 		if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1121 			hw_params.dpia_supported = true;
1122 #if defined(CONFIG_DRM_AMD_DC_DCN)
1123 			hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1124 #endif
1125 		}
1126 		break;
1127 	default:
1128 		break;
1129 	}
1130 
1131 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1132 	if (status != DMUB_STATUS_OK) {
1133 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1134 		return -EINVAL;
1135 	}
1136 
1137 	/* Wait for firmware load to finish. */
1138 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1139 	if (status != DMUB_STATUS_OK)
1140 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1141 
1142 	/* Init DMCU and ABM if available. */
1143 	if (dmcu && abm) {
1144 		dmcu->funcs->dmcu_init(dmcu);
1145 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1146 	}
1147 
1148 	if (!adev->dm.dc->ctx->dmub_srv)
1149 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1150 	if (!adev->dm.dc->ctx->dmub_srv) {
1151 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1152 		return -ENOMEM;
1153 	}
1154 
1155 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1156 		 adev->dm.dmcub_fw_version);
1157 
1158 	return 0;
1159 }
1160 
1161 #if defined(CONFIG_DRM_AMD_DC_DCN)
1162 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1163 {
1164 	uint64_t pt_base;
1165 	uint32_t logical_addr_low;
1166 	uint32_t logical_addr_high;
1167 	uint32_t agp_base, agp_bot, agp_top;
1168 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1169 
1170 	memset(pa_config, 0, sizeof(*pa_config));
1171 
1172 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1173 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1174 
1175 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1176 		/*
1177 		 * Raven2 has a HW issue that it is unable to use the vram which
1178 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1179 		 * workaround that increase system aperture high address (add 1)
1180 		 * to get rid of the VM fault and hardware hang.
1181 		 */
1182 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1183 	else
1184 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1185 
1186 	agp_base = 0;
1187 	agp_bot = adev->gmc.agp_start >> 24;
1188 	agp_top = adev->gmc.agp_end >> 24;
1189 
1190 
1191 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1192 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1193 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1194 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1195 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1196 	page_table_base.low_part = lower_32_bits(pt_base);
1197 
1198 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1199 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1200 
1201 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1202 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1203 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1204 
1205 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1206 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1207 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1208 
1209 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1210 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1211 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1212 
1213 	pa_config->is_hvm_enabled = 0;
1214 
1215 }
1216 #endif
1217 #if defined(CONFIG_DRM_AMD_DC_DCN)
1218 static void vblank_control_worker(struct work_struct *work)
1219 {
1220 	struct vblank_control_work *vblank_work =
1221 		container_of(work, struct vblank_control_work, work);
1222 	struct amdgpu_display_manager *dm = vblank_work->dm;
1223 
1224 	mutex_lock(&dm->dc_lock);
1225 
1226 	if (vblank_work->enable)
1227 		dm->active_vblank_irq_count++;
1228 	else if(dm->active_vblank_irq_count)
1229 		dm->active_vblank_irq_count--;
1230 
1231 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1232 
1233 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1234 
1235 	/* Control PSR based on vblank requirements from OS */
1236 	if (vblank_work->stream && vblank_work->stream->link) {
1237 		if (vblank_work->enable) {
1238 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1239 				amdgpu_dm_psr_disable(vblank_work->stream);
1240 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1241 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1242 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1243 			amdgpu_dm_psr_enable(vblank_work->stream);
1244 		}
1245 	}
1246 
1247 	mutex_unlock(&dm->dc_lock);
1248 
1249 	dc_stream_release(vblank_work->stream);
1250 
1251 	kfree(vblank_work);
1252 }
1253 
1254 #endif
1255 
1256 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1257 {
1258 	struct hpd_rx_irq_offload_work *offload_work;
1259 	struct amdgpu_dm_connector *aconnector;
1260 	struct dc_link *dc_link;
1261 	struct amdgpu_device *adev;
1262 	enum dc_connection_type new_connection_type = dc_connection_none;
1263 	unsigned long flags;
1264 
1265 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1266 	aconnector = offload_work->offload_wq->aconnector;
1267 
1268 	if (!aconnector) {
1269 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1270 		goto skip;
1271 	}
1272 
1273 	adev = drm_to_adev(aconnector->base.dev);
1274 	dc_link = aconnector->dc_link;
1275 
1276 	mutex_lock(&aconnector->hpd_lock);
1277 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1278 		DRM_ERROR("KMS: Failed to detect connector\n");
1279 	mutex_unlock(&aconnector->hpd_lock);
1280 
1281 	if (new_connection_type == dc_connection_none)
1282 		goto skip;
1283 
1284 	if (amdgpu_in_reset(adev))
1285 		goto skip;
1286 
1287 	mutex_lock(&adev->dm.dc_lock);
1288 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1289 		dc_link_dp_handle_automated_test(dc_link);
1290 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1291 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1292 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1293 		dc_link_dp_handle_link_loss(dc_link);
1294 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1295 		offload_work->offload_wq->is_handling_link_loss = false;
1296 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1297 	}
1298 	mutex_unlock(&adev->dm.dc_lock);
1299 
1300 skip:
1301 	kfree(offload_work);
1302 
1303 }
1304 
1305 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1306 {
1307 	int max_caps = dc->caps.max_links;
1308 	int i = 0;
1309 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1310 
1311 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1312 
1313 	if (!hpd_rx_offload_wq)
1314 		return NULL;
1315 
1316 
1317 	for (i = 0; i < max_caps; i++) {
1318 		hpd_rx_offload_wq[i].wq =
1319 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1320 
1321 		if (hpd_rx_offload_wq[i].wq == NULL) {
1322 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1323 			return NULL;
1324 		}
1325 
1326 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1327 	}
1328 
1329 	return hpd_rx_offload_wq;
1330 }
1331 
1332 struct amdgpu_stutter_quirk {
1333 	u16 chip_vendor;
1334 	u16 chip_device;
1335 	u16 subsys_vendor;
1336 	u16 subsys_device;
1337 	u8 revision;
1338 };
1339 
1340 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1341 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1342 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1343 	{ 0, 0, 0, 0, 0 },
1344 };
1345 
1346 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1347 {
1348 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1349 
1350 	while (p && p->chip_device != 0) {
1351 		if (pdev->vendor == p->chip_vendor &&
1352 		    pdev->device == p->chip_device &&
1353 		    pdev->subsystem_vendor == p->subsys_vendor &&
1354 		    pdev->subsystem_device == p->subsys_device &&
1355 		    pdev->revision == p->revision) {
1356 			return true;
1357 		}
1358 		++p;
1359 	}
1360 	return false;
1361 }
1362 
1363 static int amdgpu_dm_init(struct amdgpu_device *adev)
1364 {
1365 	struct dc_init_data init_data;
1366 #ifdef CONFIG_DRM_AMD_DC_HDCP
1367 	struct dc_callback_init init_params;
1368 #endif
1369 	int r;
1370 
1371 	adev->dm.ddev = adev_to_drm(adev);
1372 	adev->dm.adev = adev;
1373 
1374 	/* Zero all the fields */
1375 	memset(&init_data, 0, sizeof(init_data));
1376 #ifdef CONFIG_DRM_AMD_DC_HDCP
1377 	memset(&init_params, 0, sizeof(init_params));
1378 #endif
1379 
1380 	mutex_init(&adev->dm.dc_lock);
1381 	mutex_init(&adev->dm.audio_lock);
1382 #if defined(CONFIG_DRM_AMD_DC_DCN)
1383 	spin_lock_init(&adev->dm.vblank_lock);
1384 #endif
1385 
1386 	if(amdgpu_dm_irq_init(adev)) {
1387 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1388 		goto error;
1389 	}
1390 
1391 	init_data.asic_id.chip_family = adev->family;
1392 
1393 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1394 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1395 	init_data.asic_id.chip_id = adev->pdev->device;
1396 
1397 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1398 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1399 	init_data.asic_id.atombios_base_address =
1400 		adev->mode_info.atom_context->bios;
1401 
1402 	init_data.driver = adev;
1403 
1404 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1405 
1406 	if (!adev->dm.cgs_device) {
1407 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1408 		goto error;
1409 	}
1410 
1411 	init_data.cgs_device = adev->dm.cgs_device;
1412 
1413 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1414 
1415 	switch (adev->asic_type) {
1416 	case CHIP_CARRIZO:
1417 	case CHIP_STONEY:
1418 		init_data.flags.gpu_vm_support = true;
1419 		break;
1420 	default:
1421 		switch (adev->ip_versions[DCE_HWIP][0]) {
1422 		case IP_VERSION(2, 1, 0):
1423 			init_data.flags.gpu_vm_support = true;
1424 			switch (adev->dm.dmcub_fw_version) {
1425 			case 0: /* development */
1426 			case 0x1: /* linux-firmware.git hash 6d9f399 */
1427 			case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1428 				init_data.flags.disable_dmcu = false;
1429 				break;
1430 			default:
1431 				init_data.flags.disable_dmcu = true;
1432 			}
1433 			break;
1434 		case IP_VERSION(1, 0, 0):
1435 		case IP_VERSION(1, 0, 1):
1436 		case IP_VERSION(3, 0, 1):
1437 		case IP_VERSION(3, 1, 2):
1438 		case IP_VERSION(3, 1, 3):
1439 			init_data.flags.gpu_vm_support = true;
1440 			break;
1441 		case IP_VERSION(2, 0, 3):
1442 			init_data.flags.disable_dmcu = true;
1443 			break;
1444 		default:
1445 			break;
1446 		}
1447 		break;
1448 	}
1449 
1450 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1451 		init_data.flags.fbc_support = true;
1452 
1453 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1454 		init_data.flags.multi_mon_pp_mclk_switch = true;
1455 
1456 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1457 		init_data.flags.disable_fractional_pwm = true;
1458 
1459 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1460 		init_data.flags.edp_no_power_sequencing = true;
1461 
1462 	init_data.flags.power_down_display_on_boot = true;
1463 
1464 	INIT_LIST_HEAD(&adev->dm.da_list);
1465 	/* Display Core create. */
1466 	adev->dm.dc = dc_create(&init_data);
1467 
1468 	if (adev->dm.dc) {
1469 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1470 	} else {
1471 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1472 		goto error;
1473 	}
1474 
1475 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1476 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1477 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1478 	}
1479 
1480 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1481 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1482 	if (dm_should_disable_stutter(adev->pdev))
1483 		adev->dm.dc->debug.disable_stutter = true;
1484 
1485 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1486 		adev->dm.dc->debug.disable_stutter = true;
1487 
1488 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1489 		adev->dm.dc->debug.disable_dsc = true;
1490 
1491 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1492 		adev->dm.dc->debug.disable_clock_gate = true;
1493 
1494 	r = dm_dmub_hw_init(adev);
1495 	if (r) {
1496 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1497 		goto error;
1498 	}
1499 
1500 	dc_hardware_init(adev->dm.dc);
1501 
1502 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1503 	if (!adev->dm.hpd_rx_offload_wq) {
1504 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1505 		goto error;
1506 	}
1507 
1508 #if defined(CONFIG_DRM_AMD_DC_DCN)
1509 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1510 		struct dc_phy_addr_space_config pa_config;
1511 
1512 		mmhub_read_system_context(adev, &pa_config);
1513 
1514 		// Call the DC init_memory func
1515 		dc_setup_system_context(adev->dm.dc, &pa_config);
1516 	}
1517 #endif
1518 
1519 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1520 	if (!adev->dm.freesync_module) {
1521 		DRM_ERROR(
1522 		"amdgpu: failed to initialize freesync_module.\n");
1523 	} else
1524 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1525 				adev->dm.freesync_module);
1526 
1527 	amdgpu_dm_init_color_mod();
1528 
1529 #if defined(CONFIG_DRM_AMD_DC_DCN)
1530 	if (adev->dm.dc->caps.max_links > 0) {
1531 		adev->dm.vblank_control_workqueue =
1532 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1533 		if (!adev->dm.vblank_control_workqueue)
1534 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1535 	}
1536 #endif
1537 
1538 #ifdef CONFIG_DRM_AMD_DC_HDCP
1539 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1540 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1541 
1542 		if (!adev->dm.hdcp_workqueue)
1543 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1544 		else
1545 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1546 
1547 		dc_init_callbacks(adev->dm.dc, &init_params);
1548 	}
1549 #endif
1550 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1551 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1552 #endif
1553 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1554 		init_completion(&adev->dm.dmub_aux_transfer_done);
1555 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1556 		if (!adev->dm.dmub_notify) {
1557 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1558 			goto error;
1559 		}
1560 
1561 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1562 		if (!adev->dm.delayed_hpd_wq) {
1563 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1564 			goto error;
1565 		}
1566 
1567 		amdgpu_dm_outbox_init(adev);
1568 #if defined(CONFIG_DRM_AMD_DC_DCN)
1569 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1570 			dmub_aux_setconfig_callback, false)) {
1571 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1572 			goto error;
1573 		}
1574 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1575 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1576 			goto error;
1577 		}
1578 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1579 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1580 			goto error;
1581 		}
1582 #endif /* CONFIG_DRM_AMD_DC_DCN */
1583 	}
1584 
1585 	if (amdgpu_dm_initialize_drm_device(adev)) {
1586 		DRM_ERROR(
1587 		"amdgpu: failed to initialize sw for display support.\n");
1588 		goto error;
1589 	}
1590 
1591 	/* create fake encoders for MST */
1592 	dm_dp_create_fake_mst_encoders(adev);
1593 
1594 	/* TODO: Add_display_info? */
1595 
1596 	/* TODO use dynamic cursor width */
1597 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1598 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1599 
1600 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1601 		DRM_ERROR(
1602 		"amdgpu: failed to initialize sw for display support.\n");
1603 		goto error;
1604 	}
1605 
1606 
1607 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1608 
1609 	return 0;
1610 error:
1611 	amdgpu_dm_fini(adev);
1612 
1613 	return -EINVAL;
1614 }
1615 
1616 static int amdgpu_dm_early_fini(void *handle)
1617 {
1618 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1619 
1620 	amdgpu_dm_audio_fini(adev);
1621 
1622 	return 0;
1623 }
1624 
1625 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1626 {
1627 	int i;
1628 
1629 #if defined(CONFIG_DRM_AMD_DC_DCN)
1630 	if (adev->dm.vblank_control_workqueue) {
1631 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1632 		adev->dm.vblank_control_workqueue = NULL;
1633 	}
1634 #endif
1635 
1636 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1637 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1638 	}
1639 
1640 	amdgpu_dm_destroy_drm_device(&adev->dm);
1641 
1642 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1643 	if (adev->dm.crc_rd_wrk) {
1644 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1645 		kfree(adev->dm.crc_rd_wrk);
1646 		adev->dm.crc_rd_wrk = NULL;
1647 	}
1648 #endif
1649 #ifdef CONFIG_DRM_AMD_DC_HDCP
1650 	if (adev->dm.hdcp_workqueue) {
1651 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1652 		adev->dm.hdcp_workqueue = NULL;
1653 	}
1654 
1655 	if (adev->dm.dc)
1656 		dc_deinit_callbacks(adev->dm.dc);
1657 #endif
1658 
1659 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1660 
1661 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1662 		kfree(adev->dm.dmub_notify);
1663 		adev->dm.dmub_notify = NULL;
1664 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1665 		adev->dm.delayed_hpd_wq = NULL;
1666 	}
1667 
1668 	if (adev->dm.dmub_bo)
1669 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1670 				      &adev->dm.dmub_bo_gpu_addr,
1671 				      &adev->dm.dmub_bo_cpu_addr);
1672 
1673 	if (adev->dm.hpd_rx_offload_wq) {
1674 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1675 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1676 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1677 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1678 			}
1679 		}
1680 
1681 		kfree(adev->dm.hpd_rx_offload_wq);
1682 		adev->dm.hpd_rx_offload_wq = NULL;
1683 	}
1684 
1685 	/* DC Destroy TODO: Replace destroy DAL */
1686 	if (adev->dm.dc)
1687 		dc_destroy(&adev->dm.dc);
1688 	/*
1689 	 * TODO: pageflip, vlank interrupt
1690 	 *
1691 	 * amdgpu_dm_irq_fini(adev);
1692 	 */
1693 
1694 	if (adev->dm.cgs_device) {
1695 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1696 		adev->dm.cgs_device = NULL;
1697 	}
1698 	if (adev->dm.freesync_module) {
1699 		mod_freesync_destroy(adev->dm.freesync_module);
1700 		adev->dm.freesync_module = NULL;
1701 	}
1702 
1703 	mutex_destroy(&adev->dm.audio_lock);
1704 	mutex_destroy(&adev->dm.dc_lock);
1705 
1706 	return;
1707 }
1708 
1709 static int load_dmcu_fw(struct amdgpu_device *adev)
1710 {
1711 	const char *fw_name_dmcu = NULL;
1712 	int r;
1713 	const struct dmcu_firmware_header_v1_0 *hdr;
1714 
1715 	switch(adev->asic_type) {
1716 #if defined(CONFIG_DRM_AMD_DC_SI)
1717 	case CHIP_TAHITI:
1718 	case CHIP_PITCAIRN:
1719 	case CHIP_VERDE:
1720 	case CHIP_OLAND:
1721 #endif
1722 	case CHIP_BONAIRE:
1723 	case CHIP_HAWAII:
1724 	case CHIP_KAVERI:
1725 	case CHIP_KABINI:
1726 	case CHIP_MULLINS:
1727 	case CHIP_TONGA:
1728 	case CHIP_FIJI:
1729 	case CHIP_CARRIZO:
1730 	case CHIP_STONEY:
1731 	case CHIP_POLARIS11:
1732 	case CHIP_POLARIS10:
1733 	case CHIP_POLARIS12:
1734 	case CHIP_VEGAM:
1735 	case CHIP_VEGA10:
1736 	case CHIP_VEGA12:
1737 	case CHIP_VEGA20:
1738 		return 0;
1739 	case CHIP_NAVI12:
1740 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1741 		break;
1742 	case CHIP_RAVEN:
1743 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1744 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1745 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1746 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1747 		else
1748 			return 0;
1749 		break;
1750 	default:
1751 		switch (adev->ip_versions[DCE_HWIP][0]) {
1752 		case IP_VERSION(2, 0, 2):
1753 		case IP_VERSION(2, 0, 3):
1754 		case IP_VERSION(2, 0, 0):
1755 		case IP_VERSION(2, 1, 0):
1756 		case IP_VERSION(3, 0, 0):
1757 		case IP_VERSION(3, 0, 2):
1758 		case IP_VERSION(3, 0, 3):
1759 		case IP_VERSION(3, 0, 1):
1760 		case IP_VERSION(3, 1, 2):
1761 		case IP_VERSION(3, 1, 3):
1762 			return 0;
1763 		default:
1764 			break;
1765 		}
1766 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1767 		return -EINVAL;
1768 	}
1769 
1770 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1771 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1772 		return 0;
1773 	}
1774 
1775 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1776 	if (r == -ENOENT) {
1777 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1778 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1779 		adev->dm.fw_dmcu = NULL;
1780 		return 0;
1781 	}
1782 	if (r) {
1783 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1784 			fw_name_dmcu);
1785 		return r;
1786 	}
1787 
1788 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1789 	if (r) {
1790 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1791 			fw_name_dmcu);
1792 		release_firmware(adev->dm.fw_dmcu);
1793 		adev->dm.fw_dmcu = NULL;
1794 		return r;
1795 	}
1796 
1797 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1798 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1799 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1800 	adev->firmware.fw_size +=
1801 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1802 
1803 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1804 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1805 	adev->firmware.fw_size +=
1806 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1807 
1808 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1809 
1810 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1811 
1812 	return 0;
1813 }
1814 
1815 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1816 {
1817 	struct amdgpu_device *adev = ctx;
1818 
1819 	return dm_read_reg(adev->dm.dc->ctx, address);
1820 }
1821 
1822 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1823 				     uint32_t value)
1824 {
1825 	struct amdgpu_device *adev = ctx;
1826 
1827 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1828 }
1829 
1830 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1831 {
1832 	struct dmub_srv_create_params create_params;
1833 	struct dmub_srv_region_params region_params;
1834 	struct dmub_srv_region_info region_info;
1835 	struct dmub_srv_fb_params fb_params;
1836 	struct dmub_srv_fb_info *fb_info;
1837 	struct dmub_srv *dmub_srv;
1838 	const struct dmcub_firmware_header_v1_0 *hdr;
1839 	const char *fw_name_dmub;
1840 	enum dmub_asic dmub_asic;
1841 	enum dmub_status status;
1842 	int r;
1843 
1844 	switch (adev->ip_versions[DCE_HWIP][0]) {
1845 	case IP_VERSION(2, 1, 0):
1846 		dmub_asic = DMUB_ASIC_DCN21;
1847 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1848 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1849 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1850 		break;
1851 	case IP_VERSION(3, 0, 0):
1852 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1853 			dmub_asic = DMUB_ASIC_DCN30;
1854 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1855 		} else {
1856 			dmub_asic = DMUB_ASIC_DCN30;
1857 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1858 		}
1859 		break;
1860 	case IP_VERSION(3, 0, 1):
1861 		dmub_asic = DMUB_ASIC_DCN301;
1862 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1863 		break;
1864 	case IP_VERSION(3, 0, 2):
1865 		dmub_asic = DMUB_ASIC_DCN302;
1866 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1867 		break;
1868 	case IP_VERSION(3, 0, 3):
1869 		dmub_asic = DMUB_ASIC_DCN303;
1870 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1871 		break;
1872 	case IP_VERSION(3, 1, 2):
1873 	case IP_VERSION(3, 1, 3):
1874 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1875 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1876 		break;
1877 
1878 	default:
1879 		/* ASIC doesn't support DMUB. */
1880 		return 0;
1881 	}
1882 
1883 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1884 	if (r) {
1885 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1886 		return 0;
1887 	}
1888 
1889 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1890 	if (r) {
1891 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1892 		return 0;
1893 	}
1894 
1895 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1896 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1897 
1898 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1899 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1900 			AMDGPU_UCODE_ID_DMCUB;
1901 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1902 			adev->dm.dmub_fw;
1903 		adev->firmware.fw_size +=
1904 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1905 
1906 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1907 			 adev->dm.dmcub_fw_version);
1908 	}
1909 
1910 
1911 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1912 	dmub_srv = adev->dm.dmub_srv;
1913 
1914 	if (!dmub_srv) {
1915 		DRM_ERROR("Failed to allocate DMUB service!\n");
1916 		return -ENOMEM;
1917 	}
1918 
1919 	memset(&create_params, 0, sizeof(create_params));
1920 	create_params.user_ctx = adev;
1921 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1922 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1923 	create_params.asic = dmub_asic;
1924 
1925 	/* Create the DMUB service. */
1926 	status = dmub_srv_create(dmub_srv, &create_params);
1927 	if (status != DMUB_STATUS_OK) {
1928 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1929 		return -EINVAL;
1930 	}
1931 
1932 	/* Calculate the size of all the regions for the DMUB service. */
1933 	memset(&region_params, 0, sizeof(region_params));
1934 
1935 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1936 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1937 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1938 	region_params.vbios_size = adev->bios_size;
1939 	region_params.fw_bss_data = region_params.bss_data_size ?
1940 		adev->dm.dmub_fw->data +
1941 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1942 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1943 	region_params.fw_inst_const =
1944 		adev->dm.dmub_fw->data +
1945 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1946 		PSP_HEADER_BYTES;
1947 
1948 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1949 					   &region_info);
1950 
1951 	if (status != DMUB_STATUS_OK) {
1952 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1953 		return -EINVAL;
1954 	}
1955 
1956 	/*
1957 	 * Allocate a framebuffer based on the total size of all the regions.
1958 	 * TODO: Move this into GART.
1959 	 */
1960 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1961 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1962 				    &adev->dm.dmub_bo_gpu_addr,
1963 				    &adev->dm.dmub_bo_cpu_addr);
1964 	if (r)
1965 		return r;
1966 
1967 	/* Rebase the regions on the framebuffer address. */
1968 	memset(&fb_params, 0, sizeof(fb_params));
1969 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1970 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1971 	fb_params.region_info = &region_info;
1972 
1973 	adev->dm.dmub_fb_info =
1974 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1975 	fb_info = adev->dm.dmub_fb_info;
1976 
1977 	if (!fb_info) {
1978 		DRM_ERROR(
1979 			"Failed to allocate framebuffer info for DMUB service!\n");
1980 		return -ENOMEM;
1981 	}
1982 
1983 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1984 	if (status != DMUB_STATUS_OK) {
1985 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1986 		return -EINVAL;
1987 	}
1988 
1989 	return 0;
1990 }
1991 
1992 static int dm_sw_init(void *handle)
1993 {
1994 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1995 	int r;
1996 
1997 	r = dm_dmub_sw_init(adev);
1998 	if (r)
1999 		return r;
2000 
2001 	return load_dmcu_fw(adev);
2002 }
2003 
2004 static int dm_sw_fini(void *handle)
2005 {
2006 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2007 
2008 	kfree(adev->dm.dmub_fb_info);
2009 	adev->dm.dmub_fb_info = NULL;
2010 
2011 	if (adev->dm.dmub_srv) {
2012 		dmub_srv_destroy(adev->dm.dmub_srv);
2013 		adev->dm.dmub_srv = NULL;
2014 	}
2015 
2016 	release_firmware(adev->dm.dmub_fw);
2017 	adev->dm.dmub_fw = NULL;
2018 
2019 	release_firmware(adev->dm.fw_dmcu);
2020 	adev->dm.fw_dmcu = NULL;
2021 
2022 	return 0;
2023 }
2024 
2025 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2026 {
2027 	struct amdgpu_dm_connector *aconnector;
2028 	struct drm_connector *connector;
2029 	struct drm_connector_list_iter iter;
2030 	int ret = 0;
2031 
2032 	drm_connector_list_iter_begin(dev, &iter);
2033 	drm_for_each_connector_iter(connector, &iter) {
2034 		aconnector = to_amdgpu_dm_connector(connector);
2035 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2036 		    aconnector->mst_mgr.aux) {
2037 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2038 					 aconnector,
2039 					 aconnector->base.base.id);
2040 
2041 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2042 			if (ret < 0) {
2043 				DRM_ERROR("DM_MST: Failed to start MST\n");
2044 				aconnector->dc_link->type =
2045 					dc_connection_single;
2046 				break;
2047 			}
2048 		}
2049 	}
2050 	drm_connector_list_iter_end(&iter);
2051 
2052 	return ret;
2053 }
2054 
2055 static int dm_late_init(void *handle)
2056 {
2057 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2058 
2059 	struct dmcu_iram_parameters params;
2060 	unsigned int linear_lut[16];
2061 	int i;
2062 	struct dmcu *dmcu = NULL;
2063 
2064 	dmcu = adev->dm.dc->res_pool->dmcu;
2065 
2066 	for (i = 0; i < 16; i++)
2067 		linear_lut[i] = 0xFFFF * i / 15;
2068 
2069 	params.set = 0;
2070 	params.backlight_ramping_override = false;
2071 	params.backlight_ramping_start = 0xCCCC;
2072 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2073 	params.backlight_lut_array_size = 16;
2074 	params.backlight_lut_array = linear_lut;
2075 
2076 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2077 	 * 0xFFFF x 0.01 = 0x28F
2078 	 */
2079 	params.min_abm_backlight = 0x28F;
2080 	/* In the case where abm is implemented on dmcub,
2081 	* dmcu object will be null.
2082 	* ABM 2.4 and up are implemented on dmcub.
2083 	*/
2084 	if (dmcu) {
2085 		if (!dmcu_load_iram(dmcu, params))
2086 			return -EINVAL;
2087 	} else if (adev->dm.dc->ctx->dmub_srv) {
2088 		struct dc_link *edp_links[MAX_NUM_EDP];
2089 		int edp_num;
2090 
2091 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2092 		for (i = 0; i < edp_num; i++) {
2093 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2094 				return -EINVAL;
2095 		}
2096 	}
2097 
2098 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2099 }
2100 
2101 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2102 {
2103 	struct amdgpu_dm_connector *aconnector;
2104 	struct drm_connector *connector;
2105 	struct drm_connector_list_iter iter;
2106 	struct drm_dp_mst_topology_mgr *mgr;
2107 	int ret;
2108 	bool need_hotplug = false;
2109 
2110 	drm_connector_list_iter_begin(dev, &iter);
2111 	drm_for_each_connector_iter(connector, &iter) {
2112 		aconnector = to_amdgpu_dm_connector(connector);
2113 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2114 		    aconnector->mst_port)
2115 			continue;
2116 
2117 		mgr = &aconnector->mst_mgr;
2118 
2119 		if (suspend) {
2120 			drm_dp_mst_topology_mgr_suspend(mgr);
2121 		} else {
2122 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2123 			if (ret < 0) {
2124 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2125 				need_hotplug = true;
2126 			}
2127 		}
2128 	}
2129 	drm_connector_list_iter_end(&iter);
2130 
2131 	if (need_hotplug)
2132 		drm_kms_helper_hotplug_event(dev);
2133 }
2134 
2135 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2136 {
2137 	struct smu_context *smu = &adev->smu;
2138 	int ret = 0;
2139 
2140 	if (!is_support_sw_smu(adev))
2141 		return 0;
2142 
2143 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2144 	 * on window driver dc implementation.
2145 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2146 	 * should be passed to smu during boot up and resume from s3.
2147 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2148 	 * dcn20_resource_construct
2149 	 * then call pplib functions below to pass the settings to smu:
2150 	 * smu_set_watermarks_for_clock_ranges
2151 	 * smu_set_watermarks_table
2152 	 * navi10_set_watermarks_table
2153 	 * smu_write_watermarks_table
2154 	 *
2155 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2156 	 * dc has implemented different flow for window driver:
2157 	 * dc_hardware_init / dc_set_power_state
2158 	 * dcn10_init_hw
2159 	 * notify_wm_ranges
2160 	 * set_wm_ranges
2161 	 * -- Linux
2162 	 * smu_set_watermarks_for_clock_ranges
2163 	 * renoir_set_watermarks_table
2164 	 * smu_write_watermarks_table
2165 	 *
2166 	 * For Linux,
2167 	 * dc_hardware_init -> amdgpu_dm_init
2168 	 * dc_set_power_state --> dm_resume
2169 	 *
2170 	 * therefore, this function apply to navi10/12/14 but not Renoir
2171 	 * *
2172 	 */
2173 	switch (adev->ip_versions[DCE_HWIP][0]) {
2174 	case IP_VERSION(2, 0, 2):
2175 	case IP_VERSION(2, 0, 0):
2176 		break;
2177 	default:
2178 		return 0;
2179 	}
2180 
2181 	ret = smu_write_watermarks_table(smu);
2182 	if (ret) {
2183 		DRM_ERROR("Failed to update WMTABLE!\n");
2184 		return ret;
2185 	}
2186 
2187 	return 0;
2188 }
2189 
2190 /**
2191  * dm_hw_init() - Initialize DC device
2192  * @handle: The base driver device containing the amdgpu_dm device.
2193  *
2194  * Initialize the &struct amdgpu_display_manager device. This involves calling
2195  * the initializers of each DM component, then populating the struct with them.
2196  *
2197  * Although the function implies hardware initialization, both hardware and
2198  * software are initialized here. Splitting them out to their relevant init
2199  * hooks is a future TODO item.
2200  *
2201  * Some notable things that are initialized here:
2202  *
2203  * - Display Core, both software and hardware
2204  * - DC modules that we need (freesync and color management)
2205  * - DRM software states
2206  * - Interrupt sources and handlers
2207  * - Vblank support
2208  * - Debug FS entries, if enabled
2209  */
2210 static int dm_hw_init(void *handle)
2211 {
2212 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2213 	/* Create DAL display manager */
2214 	amdgpu_dm_init(adev);
2215 	amdgpu_dm_hpd_init(adev);
2216 
2217 	return 0;
2218 }
2219 
2220 /**
2221  * dm_hw_fini() - Teardown DC device
2222  * @handle: The base driver device containing the amdgpu_dm device.
2223  *
2224  * Teardown components within &struct amdgpu_display_manager that require
2225  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2226  * were loaded. Also flush IRQ workqueues and disable them.
2227  */
2228 static int dm_hw_fini(void *handle)
2229 {
2230 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2231 
2232 	amdgpu_dm_hpd_fini(adev);
2233 
2234 	amdgpu_dm_irq_fini(adev);
2235 	amdgpu_dm_fini(adev);
2236 	return 0;
2237 }
2238 
2239 
2240 static int dm_enable_vblank(struct drm_crtc *crtc);
2241 static void dm_disable_vblank(struct drm_crtc *crtc);
2242 
2243 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2244 				 struct dc_state *state, bool enable)
2245 {
2246 	enum dc_irq_source irq_source;
2247 	struct amdgpu_crtc *acrtc;
2248 	int rc = -EBUSY;
2249 	int i = 0;
2250 
2251 	for (i = 0; i < state->stream_count; i++) {
2252 		acrtc = get_crtc_by_otg_inst(
2253 				adev, state->stream_status[i].primary_otg_inst);
2254 
2255 		if (acrtc && state->stream_status[i].plane_count != 0) {
2256 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2257 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2258 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2259 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2260 			if (rc)
2261 				DRM_WARN("Failed to %s pflip interrupts\n",
2262 					 enable ? "enable" : "disable");
2263 
2264 			if (enable) {
2265 				rc = dm_enable_vblank(&acrtc->base);
2266 				if (rc)
2267 					DRM_WARN("Failed to enable vblank interrupts\n");
2268 			} else {
2269 				dm_disable_vblank(&acrtc->base);
2270 			}
2271 
2272 		}
2273 	}
2274 
2275 }
2276 
2277 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2278 {
2279 	struct dc_state *context = NULL;
2280 	enum dc_status res = DC_ERROR_UNEXPECTED;
2281 	int i;
2282 	struct dc_stream_state *del_streams[MAX_PIPES];
2283 	int del_streams_count = 0;
2284 
2285 	memset(del_streams, 0, sizeof(del_streams));
2286 
2287 	context = dc_create_state(dc);
2288 	if (context == NULL)
2289 		goto context_alloc_fail;
2290 
2291 	dc_resource_state_copy_construct_current(dc, context);
2292 
2293 	/* First remove from context all streams */
2294 	for (i = 0; i < context->stream_count; i++) {
2295 		struct dc_stream_state *stream = context->streams[i];
2296 
2297 		del_streams[del_streams_count++] = stream;
2298 	}
2299 
2300 	/* Remove all planes for removed streams and then remove the streams */
2301 	for (i = 0; i < del_streams_count; i++) {
2302 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2303 			res = DC_FAIL_DETACH_SURFACES;
2304 			goto fail;
2305 		}
2306 
2307 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2308 		if (res != DC_OK)
2309 			goto fail;
2310 	}
2311 
2312 
2313 	res = dc_validate_global_state(dc, context, false);
2314 
2315 	if (res != DC_OK) {
2316 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2317 		goto fail;
2318 	}
2319 
2320 	res = dc_commit_state(dc, context);
2321 
2322 fail:
2323 	dc_release_state(context);
2324 
2325 context_alloc_fail:
2326 	return res;
2327 }
2328 
2329 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2330 {
2331 	int i;
2332 
2333 	if (dm->hpd_rx_offload_wq) {
2334 		for (i = 0; i < dm->dc->caps.max_links; i++)
2335 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2336 	}
2337 }
2338 
2339 static int dm_suspend(void *handle)
2340 {
2341 	struct amdgpu_device *adev = handle;
2342 	struct amdgpu_display_manager *dm = &adev->dm;
2343 	int ret = 0;
2344 
2345 	if (amdgpu_in_reset(adev)) {
2346 		mutex_lock(&dm->dc_lock);
2347 
2348 #if defined(CONFIG_DRM_AMD_DC_DCN)
2349 		dc_allow_idle_optimizations(adev->dm.dc, false);
2350 #endif
2351 
2352 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2353 
2354 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2355 
2356 		amdgpu_dm_commit_zero_streams(dm->dc);
2357 
2358 		amdgpu_dm_irq_suspend(adev);
2359 
2360 		hpd_rx_irq_work_suspend(dm);
2361 
2362 		return ret;
2363 	}
2364 
2365 	WARN_ON(adev->dm.cached_state);
2366 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2367 
2368 	s3_handle_mst(adev_to_drm(adev), true);
2369 
2370 	amdgpu_dm_irq_suspend(adev);
2371 
2372 	hpd_rx_irq_work_suspend(dm);
2373 
2374 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2375 
2376 	return 0;
2377 }
2378 
2379 static struct amdgpu_dm_connector *
2380 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2381 					     struct drm_crtc *crtc)
2382 {
2383 	uint32_t i;
2384 	struct drm_connector_state *new_con_state;
2385 	struct drm_connector *connector;
2386 	struct drm_crtc *crtc_from_state;
2387 
2388 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2389 		crtc_from_state = new_con_state->crtc;
2390 
2391 		if (crtc_from_state == crtc)
2392 			return to_amdgpu_dm_connector(connector);
2393 	}
2394 
2395 	return NULL;
2396 }
2397 
2398 static void emulated_link_detect(struct dc_link *link)
2399 {
2400 	struct dc_sink_init_data sink_init_data = { 0 };
2401 	struct display_sink_capability sink_caps = { 0 };
2402 	enum dc_edid_status edid_status;
2403 	struct dc_context *dc_ctx = link->ctx;
2404 	struct dc_sink *sink = NULL;
2405 	struct dc_sink *prev_sink = NULL;
2406 
2407 	link->type = dc_connection_none;
2408 	prev_sink = link->local_sink;
2409 
2410 	if (prev_sink)
2411 		dc_sink_release(prev_sink);
2412 
2413 	switch (link->connector_signal) {
2414 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2415 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2416 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2417 		break;
2418 	}
2419 
2420 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2421 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2422 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2423 		break;
2424 	}
2425 
2426 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2427 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2428 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2429 		break;
2430 	}
2431 
2432 	case SIGNAL_TYPE_LVDS: {
2433 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2434 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2435 		break;
2436 	}
2437 
2438 	case SIGNAL_TYPE_EDP: {
2439 		sink_caps.transaction_type =
2440 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2441 		sink_caps.signal = SIGNAL_TYPE_EDP;
2442 		break;
2443 	}
2444 
2445 	case SIGNAL_TYPE_DISPLAY_PORT: {
2446 		sink_caps.transaction_type =
2447 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2448 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2449 		break;
2450 	}
2451 
2452 	default:
2453 		DC_ERROR("Invalid connector type! signal:%d\n",
2454 			link->connector_signal);
2455 		return;
2456 	}
2457 
2458 	sink_init_data.link = link;
2459 	sink_init_data.sink_signal = sink_caps.signal;
2460 
2461 	sink = dc_sink_create(&sink_init_data);
2462 	if (!sink) {
2463 		DC_ERROR("Failed to create sink!\n");
2464 		return;
2465 	}
2466 
2467 	/* dc_sink_create returns a new reference */
2468 	link->local_sink = sink;
2469 
2470 	edid_status = dm_helpers_read_local_edid(
2471 			link->ctx,
2472 			link,
2473 			sink);
2474 
2475 	if (edid_status != EDID_OK)
2476 		DC_ERROR("Failed to read EDID");
2477 
2478 }
2479 
2480 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2481 				     struct amdgpu_display_manager *dm)
2482 {
2483 	struct {
2484 		struct dc_surface_update surface_updates[MAX_SURFACES];
2485 		struct dc_plane_info plane_infos[MAX_SURFACES];
2486 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2487 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2488 		struct dc_stream_update stream_update;
2489 	} * bundle;
2490 	int k, m;
2491 
2492 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2493 
2494 	if (!bundle) {
2495 		dm_error("Failed to allocate update bundle\n");
2496 		goto cleanup;
2497 	}
2498 
2499 	for (k = 0; k < dc_state->stream_count; k++) {
2500 		bundle->stream_update.stream = dc_state->streams[k];
2501 
2502 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2503 			bundle->surface_updates[m].surface =
2504 				dc_state->stream_status->plane_states[m];
2505 			bundle->surface_updates[m].surface->force_full_update =
2506 				true;
2507 		}
2508 		dc_commit_updates_for_stream(
2509 			dm->dc, bundle->surface_updates,
2510 			dc_state->stream_status->plane_count,
2511 			dc_state->streams[k], &bundle->stream_update, dc_state);
2512 	}
2513 
2514 cleanup:
2515 	kfree(bundle);
2516 
2517 	return;
2518 }
2519 
2520 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2521 {
2522 	struct dc_stream_state *stream_state;
2523 	struct amdgpu_dm_connector *aconnector = link->priv;
2524 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2525 	struct dc_stream_update stream_update;
2526 	bool dpms_off = true;
2527 
2528 	memset(&stream_update, 0, sizeof(stream_update));
2529 	stream_update.dpms_off = &dpms_off;
2530 
2531 	mutex_lock(&adev->dm.dc_lock);
2532 	stream_state = dc_stream_find_from_link(link);
2533 
2534 	if (stream_state == NULL) {
2535 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2536 		mutex_unlock(&adev->dm.dc_lock);
2537 		return;
2538 	}
2539 
2540 	stream_update.stream = stream_state;
2541 	acrtc_state->force_dpms_off = true;
2542 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2543 				     stream_state, &stream_update,
2544 				     stream_state->ctx->dc->current_state);
2545 	mutex_unlock(&adev->dm.dc_lock);
2546 }
2547 
2548 static int dm_resume(void *handle)
2549 {
2550 	struct amdgpu_device *adev = handle;
2551 	struct drm_device *ddev = adev_to_drm(adev);
2552 	struct amdgpu_display_manager *dm = &adev->dm;
2553 	struct amdgpu_dm_connector *aconnector;
2554 	struct drm_connector *connector;
2555 	struct drm_connector_list_iter iter;
2556 	struct drm_crtc *crtc;
2557 	struct drm_crtc_state *new_crtc_state;
2558 	struct dm_crtc_state *dm_new_crtc_state;
2559 	struct drm_plane *plane;
2560 	struct drm_plane_state *new_plane_state;
2561 	struct dm_plane_state *dm_new_plane_state;
2562 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2563 	enum dc_connection_type new_connection_type = dc_connection_none;
2564 	struct dc_state *dc_state;
2565 	int i, r, j;
2566 
2567 	if (amdgpu_in_reset(adev)) {
2568 		dc_state = dm->cached_dc_state;
2569 
2570 		/*
2571 		 * The dc->current_state is backed up into dm->cached_dc_state
2572 		 * before we commit 0 streams.
2573 		 *
2574 		 * DC will clear link encoder assignments on the real state
2575 		 * but the changes won't propagate over to the copy we made
2576 		 * before the 0 streams commit.
2577 		 *
2578 		 * DC expects that link encoder assignments are *not* valid
2579 		 * when committing a state, so as a workaround it needs to be
2580 		 * cleared here.
2581 		 */
2582 		link_enc_cfg_init(dm->dc, dc_state);
2583 
2584 		if (dc_enable_dmub_notifications(adev->dm.dc))
2585 			amdgpu_dm_outbox_init(adev);
2586 
2587 		r = dm_dmub_hw_init(adev);
2588 		if (r)
2589 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2590 
2591 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2592 		dc_resume(dm->dc);
2593 
2594 		amdgpu_dm_irq_resume_early(adev);
2595 
2596 		for (i = 0; i < dc_state->stream_count; i++) {
2597 			dc_state->streams[i]->mode_changed = true;
2598 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2599 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2600 					= 0xffffffff;
2601 			}
2602 		}
2603 #if defined(CONFIG_DRM_AMD_DC_DCN)
2604 		/*
2605 		 * Resource allocation happens for link encoders for newer ASIC in
2606 		 * dc_validate_global_state, so we need to revalidate it.
2607 		 *
2608 		 * This shouldn't fail (it passed once before), so warn if it does.
2609 		 */
2610 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2611 #endif
2612 
2613 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2614 
2615 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2616 
2617 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2618 
2619 		dc_release_state(dm->cached_dc_state);
2620 		dm->cached_dc_state = NULL;
2621 
2622 		amdgpu_dm_irq_resume_late(adev);
2623 
2624 		mutex_unlock(&dm->dc_lock);
2625 
2626 		return 0;
2627 	}
2628 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2629 	dc_release_state(dm_state->context);
2630 	dm_state->context = dc_create_state(dm->dc);
2631 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2632 	dc_resource_state_construct(dm->dc, dm_state->context);
2633 
2634 	/* Re-enable outbox interrupts for DPIA. */
2635 	if (dc_enable_dmub_notifications(adev->dm.dc))
2636 		amdgpu_dm_outbox_init(adev);
2637 
2638 	/* Before powering on DC we need to re-initialize DMUB. */
2639 	r = dm_dmub_hw_init(adev);
2640 	if (r)
2641 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2642 
2643 	/* power on hardware */
2644 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2645 
2646 	/* program HPD filter */
2647 	dc_resume(dm->dc);
2648 
2649 	/*
2650 	 * early enable HPD Rx IRQ, should be done before set mode as short
2651 	 * pulse interrupts are used for MST
2652 	 */
2653 	amdgpu_dm_irq_resume_early(adev);
2654 
2655 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2656 	s3_handle_mst(ddev, false);
2657 
2658 	/* Do detection*/
2659 	drm_connector_list_iter_begin(ddev, &iter);
2660 	drm_for_each_connector_iter(connector, &iter) {
2661 		aconnector = to_amdgpu_dm_connector(connector);
2662 
2663 		/*
2664 		 * this is the case when traversing through already created
2665 		 * MST connectors, should be skipped
2666 		 */
2667 		if (aconnector->mst_port)
2668 			continue;
2669 
2670 		mutex_lock(&aconnector->hpd_lock);
2671 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2672 			DRM_ERROR("KMS: Failed to detect connector\n");
2673 
2674 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2675 			emulated_link_detect(aconnector->dc_link);
2676 		else
2677 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2678 
2679 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2680 			aconnector->fake_enable = false;
2681 
2682 		if (aconnector->dc_sink)
2683 			dc_sink_release(aconnector->dc_sink);
2684 		aconnector->dc_sink = NULL;
2685 		amdgpu_dm_update_connector_after_detect(aconnector);
2686 		mutex_unlock(&aconnector->hpd_lock);
2687 	}
2688 	drm_connector_list_iter_end(&iter);
2689 
2690 	/* Force mode set in atomic commit */
2691 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2692 		new_crtc_state->active_changed = true;
2693 
2694 	/*
2695 	 * atomic_check is expected to create the dc states. We need to release
2696 	 * them here, since they were duplicated as part of the suspend
2697 	 * procedure.
2698 	 */
2699 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2700 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2701 		if (dm_new_crtc_state->stream) {
2702 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2703 			dc_stream_release(dm_new_crtc_state->stream);
2704 			dm_new_crtc_state->stream = NULL;
2705 		}
2706 	}
2707 
2708 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2709 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2710 		if (dm_new_plane_state->dc_state) {
2711 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2712 			dc_plane_state_release(dm_new_plane_state->dc_state);
2713 			dm_new_plane_state->dc_state = NULL;
2714 		}
2715 	}
2716 
2717 	drm_atomic_helper_resume(ddev, dm->cached_state);
2718 
2719 	dm->cached_state = NULL;
2720 
2721 	amdgpu_dm_irq_resume_late(adev);
2722 
2723 	amdgpu_dm_smu_write_watermarks_table(adev);
2724 
2725 	return 0;
2726 }
2727 
2728 /**
2729  * DOC: DM Lifecycle
2730  *
2731  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2732  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2733  * the base driver's device list to be initialized and torn down accordingly.
2734  *
2735  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2736  */
2737 
2738 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2739 	.name = "dm",
2740 	.early_init = dm_early_init,
2741 	.late_init = dm_late_init,
2742 	.sw_init = dm_sw_init,
2743 	.sw_fini = dm_sw_fini,
2744 	.early_fini = amdgpu_dm_early_fini,
2745 	.hw_init = dm_hw_init,
2746 	.hw_fini = dm_hw_fini,
2747 	.suspend = dm_suspend,
2748 	.resume = dm_resume,
2749 	.is_idle = dm_is_idle,
2750 	.wait_for_idle = dm_wait_for_idle,
2751 	.check_soft_reset = dm_check_soft_reset,
2752 	.soft_reset = dm_soft_reset,
2753 	.set_clockgating_state = dm_set_clockgating_state,
2754 	.set_powergating_state = dm_set_powergating_state,
2755 };
2756 
2757 const struct amdgpu_ip_block_version dm_ip_block =
2758 {
2759 	.type = AMD_IP_BLOCK_TYPE_DCE,
2760 	.major = 1,
2761 	.minor = 0,
2762 	.rev = 0,
2763 	.funcs = &amdgpu_dm_funcs,
2764 };
2765 
2766 
2767 /**
2768  * DOC: atomic
2769  *
2770  * *WIP*
2771  */
2772 
2773 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2774 	.fb_create = amdgpu_display_user_framebuffer_create,
2775 	.get_format_info = amd_get_format_info,
2776 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2777 	.atomic_check = amdgpu_dm_atomic_check,
2778 	.atomic_commit = drm_atomic_helper_commit,
2779 };
2780 
2781 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2782 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2783 };
2784 
2785 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2786 {
2787 	u32 max_cll, min_cll, max, min, q, r;
2788 	struct amdgpu_dm_backlight_caps *caps;
2789 	struct amdgpu_display_manager *dm;
2790 	struct drm_connector *conn_base;
2791 	struct amdgpu_device *adev;
2792 	struct dc_link *link = NULL;
2793 	static const u8 pre_computed_values[] = {
2794 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2795 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2796 	int i;
2797 
2798 	if (!aconnector || !aconnector->dc_link)
2799 		return;
2800 
2801 	link = aconnector->dc_link;
2802 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2803 		return;
2804 
2805 	conn_base = &aconnector->base;
2806 	adev = drm_to_adev(conn_base->dev);
2807 	dm = &adev->dm;
2808 	for (i = 0; i < dm->num_of_edps; i++) {
2809 		if (link == dm->backlight_link[i])
2810 			break;
2811 	}
2812 	if (i >= dm->num_of_edps)
2813 		return;
2814 	caps = &dm->backlight_caps[i];
2815 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2816 	caps->aux_support = false;
2817 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2818 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2819 
2820 	if (caps->ext_caps->bits.oled == 1 /*||
2821 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2822 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2823 		caps->aux_support = true;
2824 
2825 	if (amdgpu_backlight == 0)
2826 		caps->aux_support = false;
2827 	else if (amdgpu_backlight == 1)
2828 		caps->aux_support = true;
2829 
2830 	/* From the specification (CTA-861-G), for calculating the maximum
2831 	 * luminance we need to use:
2832 	 *	Luminance = 50*2**(CV/32)
2833 	 * Where CV is a one-byte value.
2834 	 * For calculating this expression we may need float point precision;
2835 	 * to avoid this complexity level, we take advantage that CV is divided
2836 	 * by a constant. From the Euclids division algorithm, we know that CV
2837 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2838 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2839 	 * need to pre-compute the value of r/32. For pre-computing the values
2840 	 * We just used the following Ruby line:
2841 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2842 	 * The results of the above expressions can be verified at
2843 	 * pre_computed_values.
2844 	 */
2845 	q = max_cll >> 5;
2846 	r = max_cll % 32;
2847 	max = (1 << q) * pre_computed_values[r];
2848 
2849 	// min luminance: maxLum * (CV/255)^2 / 100
2850 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2851 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2852 
2853 	caps->aux_max_input_signal = max;
2854 	caps->aux_min_input_signal = min;
2855 }
2856 
2857 void amdgpu_dm_update_connector_after_detect(
2858 		struct amdgpu_dm_connector *aconnector)
2859 {
2860 	struct drm_connector *connector = &aconnector->base;
2861 	struct drm_device *dev = connector->dev;
2862 	struct dc_sink *sink;
2863 
2864 	/* MST handled by drm_mst framework */
2865 	if (aconnector->mst_mgr.mst_state == true)
2866 		return;
2867 
2868 	sink = aconnector->dc_link->local_sink;
2869 	if (sink)
2870 		dc_sink_retain(sink);
2871 
2872 	/*
2873 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2874 	 * the connector sink is set to either fake or physical sink depends on link status.
2875 	 * Skip if already done during boot.
2876 	 */
2877 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2878 			&& aconnector->dc_em_sink) {
2879 
2880 		/*
2881 		 * For S3 resume with headless use eml_sink to fake stream
2882 		 * because on resume connector->sink is set to NULL
2883 		 */
2884 		mutex_lock(&dev->mode_config.mutex);
2885 
2886 		if (sink) {
2887 			if (aconnector->dc_sink) {
2888 				amdgpu_dm_update_freesync_caps(connector, NULL);
2889 				/*
2890 				 * retain and release below are used to
2891 				 * bump up refcount for sink because the link doesn't point
2892 				 * to it anymore after disconnect, so on next crtc to connector
2893 				 * reshuffle by UMD we will get into unwanted dc_sink release
2894 				 */
2895 				dc_sink_release(aconnector->dc_sink);
2896 			}
2897 			aconnector->dc_sink = sink;
2898 			dc_sink_retain(aconnector->dc_sink);
2899 			amdgpu_dm_update_freesync_caps(connector,
2900 					aconnector->edid);
2901 		} else {
2902 			amdgpu_dm_update_freesync_caps(connector, NULL);
2903 			if (!aconnector->dc_sink) {
2904 				aconnector->dc_sink = aconnector->dc_em_sink;
2905 				dc_sink_retain(aconnector->dc_sink);
2906 			}
2907 		}
2908 
2909 		mutex_unlock(&dev->mode_config.mutex);
2910 
2911 		if (sink)
2912 			dc_sink_release(sink);
2913 		return;
2914 	}
2915 
2916 	/*
2917 	 * TODO: temporary guard to look for proper fix
2918 	 * if this sink is MST sink, we should not do anything
2919 	 */
2920 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2921 		dc_sink_release(sink);
2922 		return;
2923 	}
2924 
2925 	if (aconnector->dc_sink == sink) {
2926 		/*
2927 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2928 		 * Do nothing!!
2929 		 */
2930 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2931 				aconnector->connector_id);
2932 		if (sink)
2933 			dc_sink_release(sink);
2934 		return;
2935 	}
2936 
2937 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2938 		aconnector->connector_id, aconnector->dc_sink, sink);
2939 
2940 	mutex_lock(&dev->mode_config.mutex);
2941 
2942 	/*
2943 	 * 1. Update status of the drm connector
2944 	 * 2. Send an event and let userspace tell us what to do
2945 	 */
2946 	if (sink) {
2947 		/*
2948 		 * TODO: check if we still need the S3 mode update workaround.
2949 		 * If yes, put it here.
2950 		 */
2951 		if (aconnector->dc_sink) {
2952 			amdgpu_dm_update_freesync_caps(connector, NULL);
2953 			dc_sink_release(aconnector->dc_sink);
2954 		}
2955 
2956 		aconnector->dc_sink = sink;
2957 		dc_sink_retain(aconnector->dc_sink);
2958 		if (sink->dc_edid.length == 0) {
2959 			aconnector->edid = NULL;
2960 			if (aconnector->dc_link->aux_mode) {
2961 				drm_dp_cec_unset_edid(
2962 					&aconnector->dm_dp_aux.aux);
2963 			}
2964 		} else {
2965 			aconnector->edid =
2966 				(struct edid *)sink->dc_edid.raw_edid;
2967 
2968 			drm_connector_update_edid_property(connector,
2969 							   aconnector->edid);
2970 			if (aconnector->dc_link->aux_mode)
2971 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2972 						    aconnector->edid);
2973 		}
2974 
2975 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2976 		update_connector_ext_caps(aconnector);
2977 	} else {
2978 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2979 		amdgpu_dm_update_freesync_caps(connector, NULL);
2980 		drm_connector_update_edid_property(connector, NULL);
2981 		aconnector->num_modes = 0;
2982 		dc_sink_release(aconnector->dc_sink);
2983 		aconnector->dc_sink = NULL;
2984 		aconnector->edid = NULL;
2985 #ifdef CONFIG_DRM_AMD_DC_HDCP
2986 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2987 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2988 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2989 #endif
2990 	}
2991 
2992 	mutex_unlock(&dev->mode_config.mutex);
2993 
2994 	update_subconnector_property(aconnector);
2995 
2996 	if (sink)
2997 		dc_sink_release(sink);
2998 }
2999 
3000 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3001 {
3002 	struct drm_connector *connector = &aconnector->base;
3003 	struct drm_device *dev = connector->dev;
3004 	enum dc_connection_type new_connection_type = dc_connection_none;
3005 	struct amdgpu_device *adev = drm_to_adev(dev);
3006 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3007 	struct dm_crtc_state *dm_crtc_state = NULL;
3008 
3009 	if (adev->dm.disable_hpd_irq)
3010 		return;
3011 
3012 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3013 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3014 					dm_con_state->base.state,
3015 					dm_con_state->base.crtc));
3016 	/*
3017 	 * In case of failure or MST no need to update connector status or notify the OS
3018 	 * since (for MST case) MST does this in its own context.
3019 	 */
3020 	mutex_lock(&aconnector->hpd_lock);
3021 
3022 #ifdef CONFIG_DRM_AMD_DC_HDCP
3023 	if (adev->dm.hdcp_workqueue) {
3024 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3025 		dm_con_state->update_hdcp = true;
3026 	}
3027 #endif
3028 	if (aconnector->fake_enable)
3029 		aconnector->fake_enable = false;
3030 
3031 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3032 		DRM_ERROR("KMS: Failed to detect connector\n");
3033 
3034 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3035 		emulated_link_detect(aconnector->dc_link);
3036 
3037 		drm_modeset_lock_all(dev);
3038 		dm_restore_drm_connector_state(dev, connector);
3039 		drm_modeset_unlock_all(dev);
3040 
3041 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3042 			drm_kms_helper_hotplug_event(dev);
3043 
3044 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3045 		if (new_connection_type == dc_connection_none &&
3046 		    aconnector->dc_link->type == dc_connection_none &&
3047 		    dm_crtc_state)
3048 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3049 
3050 		amdgpu_dm_update_connector_after_detect(aconnector);
3051 
3052 		drm_modeset_lock_all(dev);
3053 		dm_restore_drm_connector_state(dev, connector);
3054 		drm_modeset_unlock_all(dev);
3055 
3056 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3057 			drm_kms_helper_hotplug_event(dev);
3058 	}
3059 	mutex_unlock(&aconnector->hpd_lock);
3060 
3061 }
3062 
3063 static void handle_hpd_irq(void *param)
3064 {
3065 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3066 
3067 	handle_hpd_irq_helper(aconnector);
3068 
3069 }
3070 
3071 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3072 {
3073 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3074 	uint8_t dret;
3075 	bool new_irq_handled = false;
3076 	int dpcd_addr;
3077 	int dpcd_bytes_to_read;
3078 
3079 	const int max_process_count = 30;
3080 	int process_count = 0;
3081 
3082 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3083 
3084 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3085 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3086 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3087 		dpcd_addr = DP_SINK_COUNT;
3088 	} else {
3089 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3090 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3091 		dpcd_addr = DP_SINK_COUNT_ESI;
3092 	}
3093 
3094 	dret = drm_dp_dpcd_read(
3095 		&aconnector->dm_dp_aux.aux,
3096 		dpcd_addr,
3097 		esi,
3098 		dpcd_bytes_to_read);
3099 
3100 	while (dret == dpcd_bytes_to_read &&
3101 		process_count < max_process_count) {
3102 		uint8_t retry;
3103 		dret = 0;
3104 
3105 		process_count++;
3106 
3107 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3108 		/* handle HPD short pulse irq */
3109 		if (aconnector->mst_mgr.mst_state)
3110 			drm_dp_mst_hpd_irq(
3111 				&aconnector->mst_mgr,
3112 				esi,
3113 				&new_irq_handled);
3114 
3115 		if (new_irq_handled) {
3116 			/* ACK at DPCD to notify down stream */
3117 			const int ack_dpcd_bytes_to_write =
3118 				dpcd_bytes_to_read - 1;
3119 
3120 			for (retry = 0; retry < 3; retry++) {
3121 				uint8_t wret;
3122 
3123 				wret = drm_dp_dpcd_write(
3124 					&aconnector->dm_dp_aux.aux,
3125 					dpcd_addr + 1,
3126 					&esi[1],
3127 					ack_dpcd_bytes_to_write);
3128 				if (wret == ack_dpcd_bytes_to_write)
3129 					break;
3130 			}
3131 
3132 			/* check if there is new irq to be handled */
3133 			dret = drm_dp_dpcd_read(
3134 				&aconnector->dm_dp_aux.aux,
3135 				dpcd_addr,
3136 				esi,
3137 				dpcd_bytes_to_read);
3138 
3139 			new_irq_handled = false;
3140 		} else {
3141 			break;
3142 		}
3143 	}
3144 
3145 	if (process_count == max_process_count)
3146 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3147 }
3148 
3149 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3150 							union hpd_irq_data hpd_irq_data)
3151 {
3152 	struct hpd_rx_irq_offload_work *offload_work =
3153 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3154 
3155 	if (!offload_work) {
3156 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3157 		return;
3158 	}
3159 
3160 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3161 	offload_work->data = hpd_irq_data;
3162 	offload_work->offload_wq = offload_wq;
3163 
3164 	queue_work(offload_wq->wq, &offload_work->work);
3165 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3166 }
3167 
3168 static void handle_hpd_rx_irq(void *param)
3169 {
3170 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3171 	struct drm_connector *connector = &aconnector->base;
3172 	struct drm_device *dev = connector->dev;
3173 	struct dc_link *dc_link = aconnector->dc_link;
3174 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3175 	bool result = false;
3176 	enum dc_connection_type new_connection_type = dc_connection_none;
3177 	struct amdgpu_device *adev = drm_to_adev(dev);
3178 	union hpd_irq_data hpd_irq_data;
3179 	bool link_loss = false;
3180 	bool has_left_work = false;
3181 	int idx = aconnector->base.index;
3182 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3183 
3184 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3185 
3186 	if (adev->dm.disable_hpd_irq)
3187 		return;
3188 
3189 	/*
3190 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3191 	 * conflict, after implement i2c helper, this mutex should be
3192 	 * retired.
3193 	 */
3194 	mutex_lock(&aconnector->hpd_lock);
3195 
3196 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3197 						&link_loss, true, &has_left_work);
3198 
3199 	if (!has_left_work)
3200 		goto out;
3201 
3202 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3203 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3204 		goto out;
3205 	}
3206 
3207 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3208 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3209 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3210 			dm_handle_mst_sideband_msg(aconnector);
3211 			goto out;
3212 		}
3213 
3214 		if (link_loss) {
3215 			bool skip = false;
3216 
3217 			spin_lock(&offload_wq->offload_lock);
3218 			skip = offload_wq->is_handling_link_loss;
3219 
3220 			if (!skip)
3221 				offload_wq->is_handling_link_loss = true;
3222 
3223 			spin_unlock(&offload_wq->offload_lock);
3224 
3225 			if (!skip)
3226 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3227 
3228 			goto out;
3229 		}
3230 	}
3231 
3232 out:
3233 	if (result && !is_mst_root_connector) {
3234 		/* Downstream Port status changed. */
3235 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3236 			DRM_ERROR("KMS: Failed to detect connector\n");
3237 
3238 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3239 			emulated_link_detect(dc_link);
3240 
3241 			if (aconnector->fake_enable)
3242 				aconnector->fake_enable = false;
3243 
3244 			amdgpu_dm_update_connector_after_detect(aconnector);
3245 
3246 
3247 			drm_modeset_lock_all(dev);
3248 			dm_restore_drm_connector_state(dev, connector);
3249 			drm_modeset_unlock_all(dev);
3250 
3251 			drm_kms_helper_hotplug_event(dev);
3252 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3253 
3254 			if (aconnector->fake_enable)
3255 				aconnector->fake_enable = false;
3256 
3257 			amdgpu_dm_update_connector_after_detect(aconnector);
3258 
3259 
3260 			drm_modeset_lock_all(dev);
3261 			dm_restore_drm_connector_state(dev, connector);
3262 			drm_modeset_unlock_all(dev);
3263 
3264 			drm_kms_helper_hotplug_event(dev);
3265 		}
3266 	}
3267 #ifdef CONFIG_DRM_AMD_DC_HDCP
3268 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3269 		if (adev->dm.hdcp_workqueue)
3270 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3271 	}
3272 #endif
3273 
3274 	if (dc_link->type != dc_connection_mst_branch)
3275 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3276 
3277 	mutex_unlock(&aconnector->hpd_lock);
3278 }
3279 
3280 static void register_hpd_handlers(struct amdgpu_device *adev)
3281 {
3282 	struct drm_device *dev = adev_to_drm(adev);
3283 	struct drm_connector *connector;
3284 	struct amdgpu_dm_connector *aconnector;
3285 	const struct dc_link *dc_link;
3286 	struct dc_interrupt_params int_params = {0};
3287 
3288 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3289 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3290 
3291 	list_for_each_entry(connector,
3292 			&dev->mode_config.connector_list, head)	{
3293 
3294 		aconnector = to_amdgpu_dm_connector(connector);
3295 		dc_link = aconnector->dc_link;
3296 
3297 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3298 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3299 			int_params.irq_source = dc_link->irq_source_hpd;
3300 
3301 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3302 					handle_hpd_irq,
3303 					(void *) aconnector);
3304 		}
3305 
3306 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3307 
3308 			/* Also register for DP short pulse (hpd_rx). */
3309 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3310 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3311 
3312 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3313 					handle_hpd_rx_irq,
3314 					(void *) aconnector);
3315 
3316 			if (adev->dm.hpd_rx_offload_wq)
3317 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3318 					aconnector;
3319 		}
3320 	}
3321 }
3322 
3323 #if defined(CONFIG_DRM_AMD_DC_SI)
3324 /* Register IRQ sources and initialize IRQ callbacks */
3325 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3326 {
3327 	struct dc *dc = adev->dm.dc;
3328 	struct common_irq_params *c_irq_params;
3329 	struct dc_interrupt_params int_params = {0};
3330 	int r;
3331 	int i;
3332 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3333 
3334 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3335 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3336 
3337 	/*
3338 	 * Actions of amdgpu_irq_add_id():
3339 	 * 1. Register a set() function with base driver.
3340 	 *    Base driver will call set() function to enable/disable an
3341 	 *    interrupt in DC hardware.
3342 	 * 2. Register amdgpu_dm_irq_handler().
3343 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3344 	 *    coming from DC hardware.
3345 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3346 	 *    for acknowledging and handling. */
3347 
3348 	/* Use VBLANK interrupt */
3349 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3350 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3351 		if (r) {
3352 			DRM_ERROR("Failed to add crtc irq id!\n");
3353 			return r;
3354 		}
3355 
3356 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3357 		int_params.irq_source =
3358 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3359 
3360 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3361 
3362 		c_irq_params->adev = adev;
3363 		c_irq_params->irq_src = int_params.irq_source;
3364 
3365 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3366 				dm_crtc_high_irq, c_irq_params);
3367 	}
3368 
3369 	/* Use GRPH_PFLIP interrupt */
3370 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3371 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3372 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3373 		if (r) {
3374 			DRM_ERROR("Failed to add page flip irq id!\n");
3375 			return r;
3376 		}
3377 
3378 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3379 		int_params.irq_source =
3380 			dc_interrupt_to_irq_source(dc, i, 0);
3381 
3382 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3383 
3384 		c_irq_params->adev = adev;
3385 		c_irq_params->irq_src = int_params.irq_source;
3386 
3387 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3388 				dm_pflip_high_irq, c_irq_params);
3389 
3390 	}
3391 
3392 	/* HPD */
3393 	r = amdgpu_irq_add_id(adev, client_id,
3394 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3395 	if (r) {
3396 		DRM_ERROR("Failed to add hpd irq id!\n");
3397 		return r;
3398 	}
3399 
3400 	register_hpd_handlers(adev);
3401 
3402 	return 0;
3403 }
3404 #endif
3405 
3406 /* Register IRQ sources and initialize IRQ callbacks */
3407 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3408 {
3409 	struct dc *dc = adev->dm.dc;
3410 	struct common_irq_params *c_irq_params;
3411 	struct dc_interrupt_params int_params = {0};
3412 	int r;
3413 	int i;
3414 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3415 
3416 	if (adev->family >= AMDGPU_FAMILY_AI)
3417 		client_id = SOC15_IH_CLIENTID_DCE;
3418 
3419 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3420 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3421 
3422 	/*
3423 	 * Actions of amdgpu_irq_add_id():
3424 	 * 1. Register a set() function with base driver.
3425 	 *    Base driver will call set() function to enable/disable an
3426 	 *    interrupt in DC hardware.
3427 	 * 2. Register amdgpu_dm_irq_handler().
3428 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3429 	 *    coming from DC hardware.
3430 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3431 	 *    for acknowledging and handling. */
3432 
3433 	/* Use VBLANK interrupt */
3434 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3435 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3436 		if (r) {
3437 			DRM_ERROR("Failed to add crtc irq id!\n");
3438 			return r;
3439 		}
3440 
3441 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3442 		int_params.irq_source =
3443 			dc_interrupt_to_irq_source(dc, i, 0);
3444 
3445 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3446 
3447 		c_irq_params->adev = adev;
3448 		c_irq_params->irq_src = int_params.irq_source;
3449 
3450 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3451 				dm_crtc_high_irq, c_irq_params);
3452 	}
3453 
3454 	/* Use VUPDATE interrupt */
3455 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3456 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3457 		if (r) {
3458 			DRM_ERROR("Failed to add vupdate irq id!\n");
3459 			return r;
3460 		}
3461 
3462 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3463 		int_params.irq_source =
3464 			dc_interrupt_to_irq_source(dc, i, 0);
3465 
3466 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3467 
3468 		c_irq_params->adev = adev;
3469 		c_irq_params->irq_src = int_params.irq_source;
3470 
3471 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3472 				dm_vupdate_high_irq, c_irq_params);
3473 	}
3474 
3475 	/* Use GRPH_PFLIP interrupt */
3476 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3477 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3478 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3479 		if (r) {
3480 			DRM_ERROR("Failed to add page flip irq id!\n");
3481 			return r;
3482 		}
3483 
3484 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3485 		int_params.irq_source =
3486 			dc_interrupt_to_irq_source(dc, i, 0);
3487 
3488 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3489 
3490 		c_irq_params->adev = adev;
3491 		c_irq_params->irq_src = int_params.irq_source;
3492 
3493 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3494 				dm_pflip_high_irq, c_irq_params);
3495 
3496 	}
3497 
3498 	/* HPD */
3499 	r = amdgpu_irq_add_id(adev, client_id,
3500 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3501 	if (r) {
3502 		DRM_ERROR("Failed to add hpd irq id!\n");
3503 		return r;
3504 	}
3505 
3506 	register_hpd_handlers(adev);
3507 
3508 	return 0;
3509 }
3510 
3511 #if defined(CONFIG_DRM_AMD_DC_DCN)
3512 /* Register IRQ sources and initialize IRQ callbacks */
3513 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3514 {
3515 	struct dc *dc = adev->dm.dc;
3516 	struct common_irq_params *c_irq_params;
3517 	struct dc_interrupt_params int_params = {0};
3518 	int r;
3519 	int i;
3520 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3521 	static const unsigned int vrtl_int_srcid[] = {
3522 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3523 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3524 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3525 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3526 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3527 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3528 	};
3529 #endif
3530 
3531 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3532 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3533 
3534 	/*
3535 	 * Actions of amdgpu_irq_add_id():
3536 	 * 1. Register a set() function with base driver.
3537 	 *    Base driver will call set() function to enable/disable an
3538 	 *    interrupt in DC hardware.
3539 	 * 2. Register amdgpu_dm_irq_handler().
3540 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3541 	 *    coming from DC hardware.
3542 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3543 	 *    for acknowledging and handling.
3544 	 */
3545 
3546 	/* Use VSTARTUP interrupt */
3547 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3548 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3549 			i++) {
3550 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3551 
3552 		if (r) {
3553 			DRM_ERROR("Failed to add crtc irq id!\n");
3554 			return r;
3555 		}
3556 
3557 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3558 		int_params.irq_source =
3559 			dc_interrupt_to_irq_source(dc, i, 0);
3560 
3561 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3562 
3563 		c_irq_params->adev = adev;
3564 		c_irq_params->irq_src = int_params.irq_source;
3565 
3566 		amdgpu_dm_irq_register_interrupt(
3567 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3568 	}
3569 
3570 	/* Use otg vertical line interrupt */
3571 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3572 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3573 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3574 				vrtl_int_srcid[i], &adev->vline0_irq);
3575 
3576 		if (r) {
3577 			DRM_ERROR("Failed to add vline0 irq id!\n");
3578 			return r;
3579 		}
3580 
3581 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3582 		int_params.irq_source =
3583 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3584 
3585 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3586 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3587 			break;
3588 		}
3589 
3590 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3591 					- DC_IRQ_SOURCE_DC1_VLINE0];
3592 
3593 		c_irq_params->adev = adev;
3594 		c_irq_params->irq_src = int_params.irq_source;
3595 
3596 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3597 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3598 	}
3599 #endif
3600 
3601 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3602 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3603 	 * to trigger at end of each vblank, regardless of state of the lock,
3604 	 * matching DCE behaviour.
3605 	 */
3606 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3607 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3608 	     i++) {
3609 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3610 
3611 		if (r) {
3612 			DRM_ERROR("Failed to add vupdate irq id!\n");
3613 			return r;
3614 		}
3615 
3616 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3617 		int_params.irq_source =
3618 			dc_interrupt_to_irq_source(dc, i, 0);
3619 
3620 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3621 
3622 		c_irq_params->adev = adev;
3623 		c_irq_params->irq_src = int_params.irq_source;
3624 
3625 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3626 				dm_vupdate_high_irq, c_irq_params);
3627 	}
3628 
3629 	/* Use GRPH_PFLIP interrupt */
3630 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3631 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3632 			i++) {
3633 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3634 		if (r) {
3635 			DRM_ERROR("Failed to add page flip irq id!\n");
3636 			return r;
3637 		}
3638 
3639 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3640 		int_params.irq_source =
3641 			dc_interrupt_to_irq_source(dc, i, 0);
3642 
3643 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3644 
3645 		c_irq_params->adev = adev;
3646 		c_irq_params->irq_src = int_params.irq_source;
3647 
3648 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3649 				dm_pflip_high_irq, c_irq_params);
3650 
3651 	}
3652 
3653 	/* HPD */
3654 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3655 			&adev->hpd_irq);
3656 	if (r) {
3657 		DRM_ERROR("Failed to add hpd irq id!\n");
3658 		return r;
3659 	}
3660 
3661 	register_hpd_handlers(adev);
3662 
3663 	return 0;
3664 }
3665 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3666 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3667 {
3668 	struct dc *dc = adev->dm.dc;
3669 	struct common_irq_params *c_irq_params;
3670 	struct dc_interrupt_params int_params = {0};
3671 	int r, i;
3672 
3673 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3674 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3675 
3676 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3677 			&adev->dmub_outbox_irq);
3678 	if (r) {
3679 		DRM_ERROR("Failed to add outbox irq id!\n");
3680 		return r;
3681 	}
3682 
3683 	if (dc->ctx->dmub_srv) {
3684 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3685 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3686 		int_params.irq_source =
3687 		dc_interrupt_to_irq_source(dc, i, 0);
3688 
3689 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3690 
3691 		c_irq_params->adev = adev;
3692 		c_irq_params->irq_src = int_params.irq_source;
3693 
3694 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3695 				dm_dmub_outbox1_low_irq, c_irq_params);
3696 	}
3697 
3698 	return 0;
3699 }
3700 #endif
3701 
3702 /*
3703  * Acquires the lock for the atomic state object and returns
3704  * the new atomic state.
3705  *
3706  * This should only be called during atomic check.
3707  */
3708 static int dm_atomic_get_state(struct drm_atomic_state *state,
3709 			       struct dm_atomic_state **dm_state)
3710 {
3711 	struct drm_device *dev = state->dev;
3712 	struct amdgpu_device *adev = drm_to_adev(dev);
3713 	struct amdgpu_display_manager *dm = &adev->dm;
3714 	struct drm_private_state *priv_state;
3715 
3716 	if (*dm_state)
3717 		return 0;
3718 
3719 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3720 	if (IS_ERR(priv_state))
3721 		return PTR_ERR(priv_state);
3722 
3723 	*dm_state = to_dm_atomic_state(priv_state);
3724 
3725 	return 0;
3726 }
3727 
3728 static struct dm_atomic_state *
3729 dm_atomic_get_new_state(struct drm_atomic_state *state)
3730 {
3731 	struct drm_device *dev = state->dev;
3732 	struct amdgpu_device *adev = drm_to_adev(dev);
3733 	struct amdgpu_display_manager *dm = &adev->dm;
3734 	struct drm_private_obj *obj;
3735 	struct drm_private_state *new_obj_state;
3736 	int i;
3737 
3738 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3739 		if (obj->funcs == dm->atomic_obj.funcs)
3740 			return to_dm_atomic_state(new_obj_state);
3741 	}
3742 
3743 	return NULL;
3744 }
3745 
3746 static struct drm_private_state *
3747 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3748 {
3749 	struct dm_atomic_state *old_state, *new_state;
3750 
3751 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3752 	if (!new_state)
3753 		return NULL;
3754 
3755 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3756 
3757 	old_state = to_dm_atomic_state(obj->state);
3758 
3759 	if (old_state && old_state->context)
3760 		new_state->context = dc_copy_state(old_state->context);
3761 
3762 	if (!new_state->context) {
3763 		kfree(new_state);
3764 		return NULL;
3765 	}
3766 
3767 	return &new_state->base;
3768 }
3769 
3770 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3771 				    struct drm_private_state *state)
3772 {
3773 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3774 
3775 	if (dm_state && dm_state->context)
3776 		dc_release_state(dm_state->context);
3777 
3778 	kfree(dm_state);
3779 }
3780 
3781 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3782 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3783 	.atomic_destroy_state = dm_atomic_destroy_state,
3784 };
3785 
3786 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3787 {
3788 	struct dm_atomic_state *state;
3789 	int r;
3790 
3791 	adev->mode_info.mode_config_initialized = true;
3792 
3793 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3794 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3795 
3796 	adev_to_drm(adev)->mode_config.max_width = 16384;
3797 	adev_to_drm(adev)->mode_config.max_height = 16384;
3798 
3799 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3800 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3801 	/* indicates support for immediate flip */
3802 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3803 
3804 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3805 
3806 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3807 	if (!state)
3808 		return -ENOMEM;
3809 
3810 	state->context = dc_create_state(adev->dm.dc);
3811 	if (!state->context) {
3812 		kfree(state);
3813 		return -ENOMEM;
3814 	}
3815 
3816 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3817 
3818 	drm_atomic_private_obj_init(adev_to_drm(adev),
3819 				    &adev->dm.atomic_obj,
3820 				    &state->base,
3821 				    &dm_atomic_state_funcs);
3822 
3823 	r = amdgpu_display_modeset_create_props(adev);
3824 	if (r) {
3825 		dc_release_state(state->context);
3826 		kfree(state);
3827 		return r;
3828 	}
3829 
3830 	r = amdgpu_dm_audio_init(adev);
3831 	if (r) {
3832 		dc_release_state(state->context);
3833 		kfree(state);
3834 		return r;
3835 	}
3836 
3837 	return 0;
3838 }
3839 
3840 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3841 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3842 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3843 
3844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3845 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3846 
3847 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3848 					    int bl_idx)
3849 {
3850 #if defined(CONFIG_ACPI)
3851 	struct amdgpu_dm_backlight_caps caps;
3852 
3853 	memset(&caps, 0, sizeof(caps));
3854 
3855 	if (dm->backlight_caps[bl_idx].caps_valid)
3856 		return;
3857 
3858 	amdgpu_acpi_get_backlight_caps(&caps);
3859 	if (caps.caps_valid) {
3860 		dm->backlight_caps[bl_idx].caps_valid = true;
3861 		if (caps.aux_support)
3862 			return;
3863 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3864 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3865 	} else {
3866 		dm->backlight_caps[bl_idx].min_input_signal =
3867 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3868 		dm->backlight_caps[bl_idx].max_input_signal =
3869 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3870 	}
3871 #else
3872 	if (dm->backlight_caps[bl_idx].aux_support)
3873 		return;
3874 
3875 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3876 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3877 #endif
3878 }
3879 
3880 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3881 				unsigned *min, unsigned *max)
3882 {
3883 	if (!caps)
3884 		return 0;
3885 
3886 	if (caps->aux_support) {
3887 		// Firmware limits are in nits, DC API wants millinits.
3888 		*max = 1000 * caps->aux_max_input_signal;
3889 		*min = 1000 * caps->aux_min_input_signal;
3890 	} else {
3891 		// Firmware limits are 8-bit, PWM control is 16-bit.
3892 		*max = 0x101 * caps->max_input_signal;
3893 		*min = 0x101 * caps->min_input_signal;
3894 	}
3895 	return 1;
3896 }
3897 
3898 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3899 					uint32_t brightness)
3900 {
3901 	unsigned min, max;
3902 
3903 	if (!get_brightness_range(caps, &min, &max))
3904 		return brightness;
3905 
3906 	// Rescale 0..255 to min..max
3907 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3908 				       AMDGPU_MAX_BL_LEVEL);
3909 }
3910 
3911 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3912 				      uint32_t brightness)
3913 {
3914 	unsigned min, max;
3915 
3916 	if (!get_brightness_range(caps, &min, &max))
3917 		return brightness;
3918 
3919 	if (brightness < min)
3920 		return 0;
3921 	// Rescale min..max to 0..255
3922 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3923 				 max - min);
3924 }
3925 
3926 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3927 					 int bl_idx,
3928 					 u32 user_brightness)
3929 {
3930 	struct amdgpu_dm_backlight_caps caps;
3931 	struct dc_link *link;
3932 	u32 brightness;
3933 	bool rc;
3934 
3935 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3936 	caps = dm->backlight_caps[bl_idx];
3937 
3938 	dm->brightness[bl_idx] = user_brightness;
3939 	/* update scratch register */
3940 	if (bl_idx == 0)
3941 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3942 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3943 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3944 
3945 	/* Change brightness based on AUX property */
3946 	if (caps.aux_support) {
3947 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3948 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3949 		if (!rc)
3950 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3951 	} else {
3952 		rc = dc_link_set_backlight_level(link, brightness, 0);
3953 		if (!rc)
3954 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3955 	}
3956 
3957 	return rc ? 0 : 1;
3958 }
3959 
3960 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3961 {
3962 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3963 	int i;
3964 
3965 	for (i = 0; i < dm->num_of_edps; i++) {
3966 		if (bd == dm->backlight_dev[i])
3967 			break;
3968 	}
3969 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3970 		i = 0;
3971 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3972 
3973 	return 0;
3974 }
3975 
3976 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3977 					 int bl_idx)
3978 {
3979 	struct amdgpu_dm_backlight_caps caps;
3980 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3981 
3982 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3983 	caps = dm->backlight_caps[bl_idx];
3984 
3985 	if (caps.aux_support) {
3986 		u32 avg, peak;
3987 		bool rc;
3988 
3989 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3990 		if (!rc)
3991 			return dm->brightness[bl_idx];
3992 		return convert_brightness_to_user(&caps, avg);
3993 	} else {
3994 		int ret = dc_link_get_backlight_level(link);
3995 
3996 		if (ret == DC_ERROR_UNEXPECTED)
3997 			return dm->brightness[bl_idx];
3998 		return convert_brightness_to_user(&caps, ret);
3999 	}
4000 }
4001 
4002 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4003 {
4004 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4005 	int i;
4006 
4007 	for (i = 0; i < dm->num_of_edps; i++) {
4008 		if (bd == dm->backlight_dev[i])
4009 			break;
4010 	}
4011 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4012 		i = 0;
4013 	return amdgpu_dm_backlight_get_level(dm, i);
4014 }
4015 
4016 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4017 	.options = BL_CORE_SUSPENDRESUME,
4018 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4019 	.update_status	= amdgpu_dm_backlight_update_status,
4020 };
4021 
4022 static void
4023 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4024 {
4025 	char bl_name[16];
4026 	struct backlight_properties props = { 0 };
4027 
4028 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4029 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4030 
4031 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4032 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4033 	props.type = BACKLIGHT_RAW;
4034 
4035 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4036 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4037 
4038 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4039 								       adev_to_drm(dm->adev)->dev,
4040 								       dm,
4041 								       &amdgpu_dm_backlight_ops,
4042 								       &props);
4043 
4044 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4045 		DRM_ERROR("DM: Backlight registration failed!\n");
4046 	else
4047 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4048 }
4049 #endif
4050 
4051 static int initialize_plane(struct amdgpu_display_manager *dm,
4052 			    struct amdgpu_mode_info *mode_info, int plane_id,
4053 			    enum drm_plane_type plane_type,
4054 			    const struct dc_plane_cap *plane_cap)
4055 {
4056 	struct drm_plane *plane;
4057 	unsigned long possible_crtcs;
4058 	int ret = 0;
4059 
4060 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4061 	if (!plane) {
4062 		DRM_ERROR("KMS: Failed to allocate plane\n");
4063 		return -ENOMEM;
4064 	}
4065 	plane->type = plane_type;
4066 
4067 	/*
4068 	 * HACK: IGT tests expect that the primary plane for a CRTC
4069 	 * can only have one possible CRTC. Only expose support for
4070 	 * any CRTC if they're not going to be used as a primary plane
4071 	 * for a CRTC - like overlay or underlay planes.
4072 	 */
4073 	possible_crtcs = 1 << plane_id;
4074 	if (plane_id >= dm->dc->caps.max_streams)
4075 		possible_crtcs = 0xff;
4076 
4077 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4078 
4079 	if (ret) {
4080 		DRM_ERROR("KMS: Failed to initialize plane\n");
4081 		kfree(plane);
4082 		return ret;
4083 	}
4084 
4085 	if (mode_info)
4086 		mode_info->planes[plane_id] = plane;
4087 
4088 	return ret;
4089 }
4090 
4091 
4092 static void register_backlight_device(struct amdgpu_display_manager *dm,
4093 				      struct dc_link *link)
4094 {
4095 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4096 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4097 
4098 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4099 	    link->type != dc_connection_none) {
4100 		/*
4101 		 * Event if registration failed, we should continue with
4102 		 * DM initialization because not having a backlight control
4103 		 * is better then a black screen.
4104 		 */
4105 		if (!dm->backlight_dev[dm->num_of_edps])
4106 			amdgpu_dm_register_backlight_device(dm);
4107 
4108 		if (dm->backlight_dev[dm->num_of_edps]) {
4109 			dm->backlight_link[dm->num_of_edps] = link;
4110 			dm->num_of_edps++;
4111 		}
4112 	}
4113 #endif
4114 }
4115 
4116 
4117 /*
4118  * In this architecture, the association
4119  * connector -> encoder -> crtc
4120  * id not really requried. The crtc and connector will hold the
4121  * display_index as an abstraction to use with DAL component
4122  *
4123  * Returns 0 on success
4124  */
4125 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4126 {
4127 	struct amdgpu_display_manager *dm = &adev->dm;
4128 	int32_t i;
4129 	struct amdgpu_dm_connector *aconnector = NULL;
4130 	struct amdgpu_encoder *aencoder = NULL;
4131 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4132 	uint32_t link_cnt;
4133 	int32_t primary_planes;
4134 	enum dc_connection_type new_connection_type = dc_connection_none;
4135 	const struct dc_plane_cap *plane;
4136 	bool psr_feature_enabled = false;
4137 
4138 	dm->display_indexes_num = dm->dc->caps.max_streams;
4139 	/* Update the actual used number of crtc */
4140 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4141 
4142 	link_cnt = dm->dc->caps.max_links;
4143 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4144 		DRM_ERROR("DM: Failed to initialize mode config\n");
4145 		return -EINVAL;
4146 	}
4147 
4148 	/* There is one primary plane per CRTC */
4149 	primary_planes = dm->dc->caps.max_streams;
4150 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4151 
4152 	/*
4153 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4154 	 * Order is reversed to match iteration order in atomic check.
4155 	 */
4156 	for (i = (primary_planes - 1); i >= 0; i--) {
4157 		plane = &dm->dc->caps.planes[i];
4158 
4159 		if (initialize_plane(dm, mode_info, i,
4160 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4161 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4162 			goto fail;
4163 		}
4164 	}
4165 
4166 	/*
4167 	 * Initialize overlay planes, index starting after primary planes.
4168 	 * These planes have a higher DRM index than the primary planes since
4169 	 * they should be considered as having a higher z-order.
4170 	 * Order is reversed to match iteration order in atomic check.
4171 	 *
4172 	 * Only support DCN for now, and only expose one so we don't encourage
4173 	 * userspace to use up all the pipes.
4174 	 */
4175 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4176 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4177 
4178 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4179 			continue;
4180 
4181 		if (!plane->blends_with_above || !plane->blends_with_below)
4182 			continue;
4183 
4184 		if (!plane->pixel_format_support.argb8888)
4185 			continue;
4186 
4187 		if (initialize_plane(dm, NULL, primary_planes + i,
4188 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4189 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4190 			goto fail;
4191 		}
4192 
4193 		/* Only create one overlay plane. */
4194 		break;
4195 	}
4196 
4197 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4198 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4199 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4200 			goto fail;
4201 		}
4202 
4203 #if defined(CONFIG_DRM_AMD_DC_DCN)
4204 	/* Use Outbox interrupt */
4205 	switch (adev->ip_versions[DCE_HWIP][0]) {
4206 	case IP_VERSION(3, 0, 0):
4207 	case IP_VERSION(3, 1, 2):
4208 	case IP_VERSION(3, 1, 3):
4209 	case IP_VERSION(2, 1, 0):
4210 		if (register_outbox_irq_handlers(dm->adev)) {
4211 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4212 			goto fail;
4213 		}
4214 		break;
4215 	default:
4216 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4217 			      adev->ip_versions[DCE_HWIP][0]);
4218 	}
4219 
4220 	/* Determine whether to enable PSR support by default. */
4221 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4222 		switch (adev->ip_versions[DCE_HWIP][0]) {
4223 		case IP_VERSION(3, 1, 2):
4224 		case IP_VERSION(3, 1, 3):
4225 			psr_feature_enabled = true;
4226 			break;
4227 		default:
4228 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4229 			break;
4230 		}
4231 	}
4232 #endif
4233 
4234 	/* loops over all connectors on the board */
4235 	for (i = 0; i < link_cnt; i++) {
4236 		struct dc_link *link = NULL;
4237 
4238 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4239 			DRM_ERROR(
4240 				"KMS: Cannot support more than %d display indexes\n",
4241 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4242 			continue;
4243 		}
4244 
4245 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4246 		if (!aconnector)
4247 			goto fail;
4248 
4249 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4250 		if (!aencoder)
4251 			goto fail;
4252 
4253 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4254 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4255 			goto fail;
4256 		}
4257 
4258 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4259 			DRM_ERROR("KMS: Failed to initialize connector\n");
4260 			goto fail;
4261 		}
4262 
4263 		link = dc_get_link_at_index(dm->dc, i);
4264 
4265 		if (!dc_link_detect_sink(link, &new_connection_type))
4266 			DRM_ERROR("KMS: Failed to detect connector\n");
4267 
4268 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4269 			emulated_link_detect(link);
4270 			amdgpu_dm_update_connector_after_detect(aconnector);
4271 
4272 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4273 			amdgpu_dm_update_connector_after_detect(aconnector);
4274 			register_backlight_device(dm, link);
4275 			if (dm->num_of_edps)
4276 				update_connector_ext_caps(aconnector);
4277 			if (psr_feature_enabled)
4278 				amdgpu_dm_set_psr_caps(link);
4279 		}
4280 
4281 
4282 	}
4283 
4284 	/* Software is initialized. Now we can register interrupt handlers. */
4285 	switch (adev->asic_type) {
4286 #if defined(CONFIG_DRM_AMD_DC_SI)
4287 	case CHIP_TAHITI:
4288 	case CHIP_PITCAIRN:
4289 	case CHIP_VERDE:
4290 	case CHIP_OLAND:
4291 		if (dce60_register_irq_handlers(dm->adev)) {
4292 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4293 			goto fail;
4294 		}
4295 		break;
4296 #endif
4297 	case CHIP_BONAIRE:
4298 	case CHIP_HAWAII:
4299 	case CHIP_KAVERI:
4300 	case CHIP_KABINI:
4301 	case CHIP_MULLINS:
4302 	case CHIP_TONGA:
4303 	case CHIP_FIJI:
4304 	case CHIP_CARRIZO:
4305 	case CHIP_STONEY:
4306 	case CHIP_POLARIS11:
4307 	case CHIP_POLARIS10:
4308 	case CHIP_POLARIS12:
4309 	case CHIP_VEGAM:
4310 	case CHIP_VEGA10:
4311 	case CHIP_VEGA12:
4312 	case CHIP_VEGA20:
4313 		if (dce110_register_irq_handlers(dm->adev)) {
4314 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4315 			goto fail;
4316 		}
4317 		break;
4318 	default:
4319 #if defined(CONFIG_DRM_AMD_DC_DCN)
4320 		switch (adev->ip_versions[DCE_HWIP][0]) {
4321 		case IP_VERSION(1, 0, 0):
4322 		case IP_VERSION(1, 0, 1):
4323 		case IP_VERSION(2, 0, 2):
4324 		case IP_VERSION(2, 0, 3):
4325 		case IP_VERSION(2, 0, 0):
4326 		case IP_VERSION(2, 1, 0):
4327 		case IP_VERSION(3, 0, 0):
4328 		case IP_VERSION(3, 0, 2):
4329 		case IP_VERSION(3, 0, 3):
4330 		case IP_VERSION(3, 0, 1):
4331 		case IP_VERSION(3, 1, 2):
4332 		case IP_VERSION(3, 1, 3):
4333 			if (dcn10_register_irq_handlers(dm->adev)) {
4334 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4335 				goto fail;
4336 			}
4337 			break;
4338 		default:
4339 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4340 					adev->ip_versions[DCE_HWIP][0]);
4341 			goto fail;
4342 		}
4343 #endif
4344 		break;
4345 	}
4346 
4347 	return 0;
4348 fail:
4349 	kfree(aencoder);
4350 	kfree(aconnector);
4351 
4352 	return -EINVAL;
4353 }
4354 
4355 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4356 {
4357 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4358 	return;
4359 }
4360 
4361 /******************************************************************************
4362  * amdgpu_display_funcs functions
4363  *****************************************************************************/
4364 
4365 /*
4366  * dm_bandwidth_update - program display watermarks
4367  *
4368  * @adev: amdgpu_device pointer
4369  *
4370  * Calculate and program the display watermarks and line buffer allocation.
4371  */
4372 static void dm_bandwidth_update(struct amdgpu_device *adev)
4373 {
4374 	/* TODO: implement later */
4375 }
4376 
4377 static const struct amdgpu_display_funcs dm_display_funcs = {
4378 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4379 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4380 	.backlight_set_level = NULL, /* never called for DC */
4381 	.backlight_get_level = NULL, /* never called for DC */
4382 	.hpd_sense = NULL,/* called unconditionally */
4383 	.hpd_set_polarity = NULL, /* called unconditionally */
4384 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4385 	.page_flip_get_scanoutpos =
4386 		dm_crtc_get_scanoutpos,/* called unconditionally */
4387 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4388 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4389 };
4390 
4391 #if defined(CONFIG_DEBUG_KERNEL_DC)
4392 
4393 static ssize_t s3_debug_store(struct device *device,
4394 			      struct device_attribute *attr,
4395 			      const char *buf,
4396 			      size_t count)
4397 {
4398 	int ret;
4399 	int s3_state;
4400 	struct drm_device *drm_dev = dev_get_drvdata(device);
4401 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4402 
4403 	ret = kstrtoint(buf, 0, &s3_state);
4404 
4405 	if (ret == 0) {
4406 		if (s3_state) {
4407 			dm_resume(adev);
4408 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4409 		} else
4410 			dm_suspend(adev);
4411 	}
4412 
4413 	return ret == 0 ? count : 0;
4414 }
4415 
4416 DEVICE_ATTR_WO(s3_debug);
4417 
4418 #endif
4419 
4420 static int dm_early_init(void *handle)
4421 {
4422 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4423 
4424 	switch (adev->asic_type) {
4425 #if defined(CONFIG_DRM_AMD_DC_SI)
4426 	case CHIP_TAHITI:
4427 	case CHIP_PITCAIRN:
4428 	case CHIP_VERDE:
4429 		adev->mode_info.num_crtc = 6;
4430 		adev->mode_info.num_hpd = 6;
4431 		adev->mode_info.num_dig = 6;
4432 		break;
4433 	case CHIP_OLAND:
4434 		adev->mode_info.num_crtc = 2;
4435 		adev->mode_info.num_hpd = 2;
4436 		adev->mode_info.num_dig = 2;
4437 		break;
4438 #endif
4439 	case CHIP_BONAIRE:
4440 	case CHIP_HAWAII:
4441 		adev->mode_info.num_crtc = 6;
4442 		adev->mode_info.num_hpd = 6;
4443 		adev->mode_info.num_dig = 6;
4444 		break;
4445 	case CHIP_KAVERI:
4446 		adev->mode_info.num_crtc = 4;
4447 		adev->mode_info.num_hpd = 6;
4448 		adev->mode_info.num_dig = 7;
4449 		break;
4450 	case CHIP_KABINI:
4451 	case CHIP_MULLINS:
4452 		adev->mode_info.num_crtc = 2;
4453 		adev->mode_info.num_hpd = 6;
4454 		adev->mode_info.num_dig = 6;
4455 		break;
4456 	case CHIP_FIJI:
4457 	case CHIP_TONGA:
4458 		adev->mode_info.num_crtc = 6;
4459 		adev->mode_info.num_hpd = 6;
4460 		adev->mode_info.num_dig = 7;
4461 		break;
4462 	case CHIP_CARRIZO:
4463 		adev->mode_info.num_crtc = 3;
4464 		adev->mode_info.num_hpd = 6;
4465 		adev->mode_info.num_dig = 9;
4466 		break;
4467 	case CHIP_STONEY:
4468 		adev->mode_info.num_crtc = 2;
4469 		adev->mode_info.num_hpd = 6;
4470 		adev->mode_info.num_dig = 9;
4471 		break;
4472 	case CHIP_POLARIS11:
4473 	case CHIP_POLARIS12:
4474 		adev->mode_info.num_crtc = 5;
4475 		adev->mode_info.num_hpd = 5;
4476 		adev->mode_info.num_dig = 5;
4477 		break;
4478 	case CHIP_POLARIS10:
4479 	case CHIP_VEGAM:
4480 		adev->mode_info.num_crtc = 6;
4481 		adev->mode_info.num_hpd = 6;
4482 		adev->mode_info.num_dig = 6;
4483 		break;
4484 	case CHIP_VEGA10:
4485 	case CHIP_VEGA12:
4486 	case CHIP_VEGA20:
4487 		adev->mode_info.num_crtc = 6;
4488 		adev->mode_info.num_hpd = 6;
4489 		adev->mode_info.num_dig = 6;
4490 		break;
4491 	default:
4492 #if defined(CONFIG_DRM_AMD_DC_DCN)
4493 		switch (adev->ip_versions[DCE_HWIP][0]) {
4494 		case IP_VERSION(2, 0, 2):
4495 		case IP_VERSION(3, 0, 0):
4496 			adev->mode_info.num_crtc = 6;
4497 			adev->mode_info.num_hpd = 6;
4498 			adev->mode_info.num_dig = 6;
4499 			break;
4500 		case IP_VERSION(2, 0, 0):
4501 		case IP_VERSION(3, 0, 2):
4502 			adev->mode_info.num_crtc = 5;
4503 			adev->mode_info.num_hpd = 5;
4504 			adev->mode_info.num_dig = 5;
4505 			break;
4506 		case IP_VERSION(2, 0, 3):
4507 		case IP_VERSION(3, 0, 3):
4508 			adev->mode_info.num_crtc = 2;
4509 			adev->mode_info.num_hpd = 2;
4510 			adev->mode_info.num_dig = 2;
4511 			break;
4512 		case IP_VERSION(1, 0, 0):
4513 		case IP_VERSION(1, 0, 1):
4514 		case IP_VERSION(3, 0, 1):
4515 		case IP_VERSION(2, 1, 0):
4516 		case IP_VERSION(3, 1, 2):
4517 		case IP_VERSION(3, 1, 3):
4518 			adev->mode_info.num_crtc = 4;
4519 			adev->mode_info.num_hpd = 4;
4520 			adev->mode_info.num_dig = 4;
4521 			break;
4522 		default:
4523 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4524 					adev->ip_versions[DCE_HWIP][0]);
4525 			return -EINVAL;
4526 		}
4527 #endif
4528 		break;
4529 	}
4530 
4531 	amdgpu_dm_set_irq_funcs(adev);
4532 
4533 	if (adev->mode_info.funcs == NULL)
4534 		adev->mode_info.funcs = &dm_display_funcs;
4535 
4536 	/*
4537 	 * Note: Do NOT change adev->audio_endpt_rreg and
4538 	 * adev->audio_endpt_wreg because they are initialised in
4539 	 * amdgpu_device_init()
4540 	 */
4541 #if defined(CONFIG_DEBUG_KERNEL_DC)
4542 	device_create_file(
4543 		adev_to_drm(adev)->dev,
4544 		&dev_attr_s3_debug);
4545 #endif
4546 
4547 	return 0;
4548 }
4549 
4550 static bool modeset_required(struct drm_crtc_state *crtc_state,
4551 			     struct dc_stream_state *new_stream,
4552 			     struct dc_stream_state *old_stream)
4553 {
4554 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4555 }
4556 
4557 static bool modereset_required(struct drm_crtc_state *crtc_state)
4558 {
4559 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4560 }
4561 
4562 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4563 {
4564 	drm_encoder_cleanup(encoder);
4565 	kfree(encoder);
4566 }
4567 
4568 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4569 	.destroy = amdgpu_dm_encoder_destroy,
4570 };
4571 
4572 
4573 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4574 					 struct drm_framebuffer *fb,
4575 					 int *min_downscale, int *max_upscale)
4576 {
4577 	struct amdgpu_device *adev = drm_to_adev(dev);
4578 	struct dc *dc = adev->dm.dc;
4579 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4580 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4581 
4582 	switch (fb->format->format) {
4583 	case DRM_FORMAT_P010:
4584 	case DRM_FORMAT_NV12:
4585 	case DRM_FORMAT_NV21:
4586 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4587 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4588 		break;
4589 
4590 	case DRM_FORMAT_XRGB16161616F:
4591 	case DRM_FORMAT_ARGB16161616F:
4592 	case DRM_FORMAT_XBGR16161616F:
4593 	case DRM_FORMAT_ABGR16161616F:
4594 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4595 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4596 		break;
4597 
4598 	default:
4599 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4600 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4601 		break;
4602 	}
4603 
4604 	/*
4605 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4606 	 * scaling factor of 1.0 == 1000 units.
4607 	 */
4608 	if (*max_upscale == 1)
4609 		*max_upscale = 1000;
4610 
4611 	if (*min_downscale == 1)
4612 		*min_downscale = 1000;
4613 }
4614 
4615 
4616 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4617 				const struct drm_plane_state *state,
4618 				struct dc_scaling_info *scaling_info)
4619 {
4620 	int scale_w, scale_h, min_downscale, max_upscale;
4621 
4622 	memset(scaling_info, 0, sizeof(*scaling_info));
4623 
4624 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4625 	scaling_info->src_rect.x = state->src_x >> 16;
4626 	scaling_info->src_rect.y = state->src_y >> 16;
4627 
4628 	/*
4629 	 * For reasons we don't (yet) fully understand a non-zero
4630 	 * src_y coordinate into an NV12 buffer can cause a
4631 	 * system hang on DCN1x.
4632 	 * To avoid hangs (and maybe be overly cautious)
4633 	 * let's reject both non-zero src_x and src_y.
4634 	 *
4635 	 * We currently know of only one use-case to reproduce a
4636 	 * scenario with non-zero src_x and src_y for NV12, which
4637 	 * is to gesture the YouTube Android app into full screen
4638 	 * on ChromeOS.
4639 	 */
4640 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4641 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4642 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4643 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4644 		return -EINVAL;
4645 
4646 	scaling_info->src_rect.width = state->src_w >> 16;
4647 	if (scaling_info->src_rect.width == 0)
4648 		return -EINVAL;
4649 
4650 	scaling_info->src_rect.height = state->src_h >> 16;
4651 	if (scaling_info->src_rect.height == 0)
4652 		return -EINVAL;
4653 
4654 	scaling_info->dst_rect.x = state->crtc_x;
4655 	scaling_info->dst_rect.y = state->crtc_y;
4656 
4657 	if (state->crtc_w == 0)
4658 		return -EINVAL;
4659 
4660 	scaling_info->dst_rect.width = state->crtc_w;
4661 
4662 	if (state->crtc_h == 0)
4663 		return -EINVAL;
4664 
4665 	scaling_info->dst_rect.height = state->crtc_h;
4666 
4667 	/* DRM doesn't specify clipping on destination output. */
4668 	scaling_info->clip_rect = scaling_info->dst_rect;
4669 
4670 	/* Validate scaling per-format with DC plane caps */
4671 	if (state->plane && state->plane->dev && state->fb) {
4672 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4673 					     &min_downscale, &max_upscale);
4674 	} else {
4675 		min_downscale = 250;
4676 		max_upscale = 16000;
4677 	}
4678 
4679 	scale_w = scaling_info->dst_rect.width * 1000 /
4680 		  scaling_info->src_rect.width;
4681 
4682 	if (scale_w < min_downscale || scale_w > max_upscale)
4683 		return -EINVAL;
4684 
4685 	scale_h = scaling_info->dst_rect.height * 1000 /
4686 		  scaling_info->src_rect.height;
4687 
4688 	if (scale_h < min_downscale || scale_h > max_upscale)
4689 		return -EINVAL;
4690 
4691 	/*
4692 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4693 	 * assume reasonable defaults based on the format.
4694 	 */
4695 
4696 	return 0;
4697 }
4698 
4699 static void
4700 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4701 				 uint64_t tiling_flags)
4702 {
4703 	/* Fill GFX8 params */
4704 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4705 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4706 
4707 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4708 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4709 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4710 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4711 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4712 
4713 		/* XXX fix me for VI */
4714 		tiling_info->gfx8.num_banks = num_banks;
4715 		tiling_info->gfx8.array_mode =
4716 				DC_ARRAY_2D_TILED_THIN1;
4717 		tiling_info->gfx8.tile_split = tile_split;
4718 		tiling_info->gfx8.bank_width = bankw;
4719 		tiling_info->gfx8.bank_height = bankh;
4720 		tiling_info->gfx8.tile_aspect = mtaspect;
4721 		tiling_info->gfx8.tile_mode =
4722 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4723 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4724 			== DC_ARRAY_1D_TILED_THIN1) {
4725 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4726 	}
4727 
4728 	tiling_info->gfx8.pipe_config =
4729 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4730 }
4731 
4732 static void
4733 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4734 				  union dc_tiling_info *tiling_info)
4735 {
4736 	tiling_info->gfx9.num_pipes =
4737 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4738 	tiling_info->gfx9.num_banks =
4739 		adev->gfx.config.gb_addr_config_fields.num_banks;
4740 	tiling_info->gfx9.pipe_interleave =
4741 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4742 	tiling_info->gfx9.num_shader_engines =
4743 		adev->gfx.config.gb_addr_config_fields.num_se;
4744 	tiling_info->gfx9.max_compressed_frags =
4745 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4746 	tiling_info->gfx9.num_rb_per_se =
4747 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4748 	tiling_info->gfx9.shaderEnable = 1;
4749 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4750 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4751 }
4752 
4753 static int
4754 validate_dcc(struct amdgpu_device *adev,
4755 	     const enum surface_pixel_format format,
4756 	     const enum dc_rotation_angle rotation,
4757 	     const union dc_tiling_info *tiling_info,
4758 	     const struct dc_plane_dcc_param *dcc,
4759 	     const struct dc_plane_address *address,
4760 	     const struct plane_size *plane_size)
4761 {
4762 	struct dc *dc = adev->dm.dc;
4763 	struct dc_dcc_surface_param input;
4764 	struct dc_surface_dcc_cap output;
4765 
4766 	memset(&input, 0, sizeof(input));
4767 	memset(&output, 0, sizeof(output));
4768 
4769 	if (!dcc->enable)
4770 		return 0;
4771 
4772 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4773 	    !dc->cap_funcs.get_dcc_compression_cap)
4774 		return -EINVAL;
4775 
4776 	input.format = format;
4777 	input.surface_size.width = plane_size->surface_size.width;
4778 	input.surface_size.height = plane_size->surface_size.height;
4779 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4780 
4781 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4782 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4783 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4784 		input.scan = SCAN_DIRECTION_VERTICAL;
4785 
4786 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4787 		return -EINVAL;
4788 
4789 	if (!output.capable)
4790 		return -EINVAL;
4791 
4792 	if (dcc->independent_64b_blks == 0 &&
4793 	    output.grph.rgb.independent_64b_blks != 0)
4794 		return -EINVAL;
4795 
4796 	return 0;
4797 }
4798 
4799 static bool
4800 modifier_has_dcc(uint64_t modifier)
4801 {
4802 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4803 }
4804 
4805 static unsigned
4806 modifier_gfx9_swizzle_mode(uint64_t modifier)
4807 {
4808 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4809 		return 0;
4810 
4811 	return AMD_FMT_MOD_GET(TILE, modifier);
4812 }
4813 
4814 static const struct drm_format_info *
4815 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4816 {
4817 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4818 }
4819 
4820 static void
4821 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4822 				    union dc_tiling_info *tiling_info,
4823 				    uint64_t modifier)
4824 {
4825 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4826 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4827 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4828 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4829 
4830 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4831 
4832 	if (!IS_AMD_FMT_MOD(modifier))
4833 		return;
4834 
4835 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4836 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4837 
4838 	if (adev->family >= AMDGPU_FAMILY_NV) {
4839 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4840 	} else {
4841 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4842 
4843 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4844 	}
4845 }
4846 
4847 enum dm_micro_swizzle {
4848 	MICRO_SWIZZLE_Z = 0,
4849 	MICRO_SWIZZLE_S = 1,
4850 	MICRO_SWIZZLE_D = 2,
4851 	MICRO_SWIZZLE_R = 3
4852 };
4853 
4854 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4855 					  uint32_t format,
4856 					  uint64_t modifier)
4857 {
4858 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4859 	const struct drm_format_info *info = drm_format_info(format);
4860 	int i;
4861 
4862 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4863 
4864 	if (!info)
4865 		return false;
4866 
4867 	/*
4868 	 * We always have to allow these modifiers:
4869 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4870 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4871 	 */
4872 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4873 	    modifier == DRM_FORMAT_MOD_INVALID) {
4874 		return true;
4875 	}
4876 
4877 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4878 	for (i = 0; i < plane->modifier_count; i++) {
4879 		if (modifier == plane->modifiers[i])
4880 			break;
4881 	}
4882 	if (i == plane->modifier_count)
4883 		return false;
4884 
4885 	/*
4886 	 * For D swizzle the canonical modifier depends on the bpp, so check
4887 	 * it here.
4888 	 */
4889 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4890 	    adev->family >= AMDGPU_FAMILY_NV) {
4891 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4892 			return false;
4893 	}
4894 
4895 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4896 	    info->cpp[0] < 8)
4897 		return false;
4898 
4899 	if (modifier_has_dcc(modifier)) {
4900 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4901 		if (info->cpp[0] != 4)
4902 			return false;
4903 		/* We support multi-planar formats, but not when combined with
4904 		 * additional DCC metadata planes. */
4905 		if (info->num_planes > 1)
4906 			return false;
4907 	}
4908 
4909 	return true;
4910 }
4911 
4912 static void
4913 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4914 {
4915 	if (!*mods)
4916 		return;
4917 
4918 	if (*cap - *size < 1) {
4919 		uint64_t new_cap = *cap * 2;
4920 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4921 
4922 		if (!new_mods) {
4923 			kfree(*mods);
4924 			*mods = NULL;
4925 			return;
4926 		}
4927 
4928 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4929 		kfree(*mods);
4930 		*mods = new_mods;
4931 		*cap = new_cap;
4932 	}
4933 
4934 	(*mods)[*size] = mod;
4935 	*size += 1;
4936 }
4937 
4938 static void
4939 add_gfx9_modifiers(const struct amdgpu_device *adev,
4940 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4941 {
4942 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4943 	int pipe_xor_bits = min(8, pipes +
4944 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4945 	int bank_xor_bits = min(8 - pipe_xor_bits,
4946 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4947 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4948 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4949 
4950 
4951 	if (adev->family == AMDGPU_FAMILY_RV) {
4952 		/* Raven2 and later */
4953 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4954 
4955 		/*
4956 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4957 		 * doesn't support _D on DCN
4958 		 */
4959 
4960 		if (has_constant_encode) {
4961 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4962 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4963 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4964 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4965 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4966 				    AMD_FMT_MOD_SET(DCC, 1) |
4967 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4968 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4969 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4970 		}
4971 
4972 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4973 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4974 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4975 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4976 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4977 			    AMD_FMT_MOD_SET(DCC, 1) |
4978 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4979 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4980 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4981 
4982 		if (has_constant_encode) {
4983 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4984 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4985 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4986 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4987 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4988 				    AMD_FMT_MOD_SET(DCC, 1) |
4989 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4990 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4991 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4992 
4993 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4994 				    AMD_FMT_MOD_SET(RB, rb) |
4995 				    AMD_FMT_MOD_SET(PIPE, pipes));
4996 		}
4997 
4998 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4999 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5000 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5001 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5002 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5003 			    AMD_FMT_MOD_SET(DCC, 1) |
5004 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5005 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5006 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5007 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5008 			    AMD_FMT_MOD_SET(RB, rb) |
5009 			    AMD_FMT_MOD_SET(PIPE, pipes));
5010 	}
5011 
5012 	/*
5013 	 * Only supported for 64bpp on Raven, will be filtered on format in
5014 	 * dm_plane_format_mod_supported.
5015 	 */
5016 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5017 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5018 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5019 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5020 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5021 
5022 	if (adev->family == AMDGPU_FAMILY_RV) {
5023 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5024 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5025 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5026 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5027 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5028 	}
5029 
5030 	/*
5031 	 * Only supported for 64bpp on Raven, will be filtered on format in
5032 	 * dm_plane_format_mod_supported.
5033 	 */
5034 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5035 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5036 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5037 
5038 	if (adev->family == AMDGPU_FAMILY_RV) {
5039 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5040 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5041 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5042 	}
5043 }
5044 
5045 static void
5046 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5047 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5048 {
5049 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5050 
5051 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5052 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5053 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5054 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5055 		    AMD_FMT_MOD_SET(DCC, 1) |
5056 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5057 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5058 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5059 
5060 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5061 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5062 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5063 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5064 		    AMD_FMT_MOD_SET(DCC, 1) |
5065 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5066 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5067 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5068 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5069 
5070 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5071 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5072 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5073 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5074 
5075 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5076 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5077 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5078 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5079 
5080 
5081 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5082 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5083 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5084 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5085 
5086 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5087 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5088 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5089 }
5090 
5091 static void
5092 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5093 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5094 {
5095 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5096 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5097 
5098 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5099 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5100 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5101 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5102 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5103 		    AMD_FMT_MOD_SET(DCC, 1) |
5104 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5105 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5106 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5107 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5108 
5109 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5110 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5111 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5112 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5113 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5114 		    AMD_FMT_MOD_SET(DCC, 1) |
5115 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5116 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5117 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5118 
5119 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5120 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5121 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5122 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5123 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5124 		    AMD_FMT_MOD_SET(DCC, 1) |
5125 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5126 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5127 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5128 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5129 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5130 
5131 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5132 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5133 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5134 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5135 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5136 		    AMD_FMT_MOD_SET(DCC, 1) |
5137 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5138 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5139 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5140 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5141 
5142 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5143 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5144 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5145 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5146 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5147 
5148 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5149 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5150 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5151 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5152 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5153 
5154 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5155 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5156 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5157 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5158 
5159 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5160 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5161 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5162 }
5163 
5164 static int
5165 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5166 {
5167 	uint64_t size = 0, capacity = 128;
5168 	*mods = NULL;
5169 
5170 	/* We have not hooked up any pre-GFX9 modifiers. */
5171 	if (adev->family < AMDGPU_FAMILY_AI)
5172 		return 0;
5173 
5174 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5175 
5176 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5177 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5178 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5179 		return *mods ? 0 : -ENOMEM;
5180 	}
5181 
5182 	switch (adev->family) {
5183 	case AMDGPU_FAMILY_AI:
5184 	case AMDGPU_FAMILY_RV:
5185 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5186 		break;
5187 	case AMDGPU_FAMILY_NV:
5188 	case AMDGPU_FAMILY_VGH:
5189 	case AMDGPU_FAMILY_YC:
5190 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5191 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5192 		else
5193 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5194 		break;
5195 	}
5196 
5197 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5198 
5199 	/* INVALID marks the end of the list. */
5200 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5201 
5202 	if (!*mods)
5203 		return -ENOMEM;
5204 
5205 	return 0;
5206 }
5207 
5208 static int
5209 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5210 					  const struct amdgpu_framebuffer *afb,
5211 					  const enum surface_pixel_format format,
5212 					  const enum dc_rotation_angle rotation,
5213 					  const struct plane_size *plane_size,
5214 					  union dc_tiling_info *tiling_info,
5215 					  struct dc_plane_dcc_param *dcc,
5216 					  struct dc_plane_address *address,
5217 					  const bool force_disable_dcc)
5218 {
5219 	const uint64_t modifier = afb->base.modifier;
5220 	int ret = 0;
5221 
5222 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5223 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5224 
5225 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5226 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5227 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5228 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5229 
5230 		dcc->enable = 1;
5231 		dcc->meta_pitch = afb->base.pitches[1];
5232 		dcc->independent_64b_blks = independent_64b_blks;
5233 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5234 			if (independent_64b_blks && independent_128b_blks)
5235 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5236 			else if (independent_128b_blks)
5237 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5238 			else if (independent_64b_blks && !independent_128b_blks)
5239 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5240 			else
5241 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5242 		} else {
5243 			if (independent_64b_blks)
5244 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5245 			else
5246 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5247 		}
5248 
5249 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5250 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5251 	}
5252 
5253 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5254 	if (ret)
5255 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5256 
5257 	return ret;
5258 }
5259 
5260 static int
5261 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5262 			     const struct amdgpu_framebuffer *afb,
5263 			     const enum surface_pixel_format format,
5264 			     const enum dc_rotation_angle rotation,
5265 			     const uint64_t tiling_flags,
5266 			     union dc_tiling_info *tiling_info,
5267 			     struct plane_size *plane_size,
5268 			     struct dc_plane_dcc_param *dcc,
5269 			     struct dc_plane_address *address,
5270 			     bool tmz_surface,
5271 			     bool force_disable_dcc)
5272 {
5273 	const struct drm_framebuffer *fb = &afb->base;
5274 	int ret;
5275 
5276 	memset(tiling_info, 0, sizeof(*tiling_info));
5277 	memset(plane_size, 0, sizeof(*plane_size));
5278 	memset(dcc, 0, sizeof(*dcc));
5279 	memset(address, 0, sizeof(*address));
5280 
5281 	address->tmz_surface = tmz_surface;
5282 
5283 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5284 		uint64_t addr = afb->address + fb->offsets[0];
5285 
5286 		plane_size->surface_size.x = 0;
5287 		plane_size->surface_size.y = 0;
5288 		plane_size->surface_size.width = fb->width;
5289 		plane_size->surface_size.height = fb->height;
5290 		plane_size->surface_pitch =
5291 			fb->pitches[0] / fb->format->cpp[0];
5292 
5293 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5294 		address->grph.addr.low_part = lower_32_bits(addr);
5295 		address->grph.addr.high_part = upper_32_bits(addr);
5296 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5297 		uint64_t luma_addr = afb->address + fb->offsets[0];
5298 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5299 
5300 		plane_size->surface_size.x = 0;
5301 		plane_size->surface_size.y = 0;
5302 		plane_size->surface_size.width = fb->width;
5303 		plane_size->surface_size.height = fb->height;
5304 		plane_size->surface_pitch =
5305 			fb->pitches[0] / fb->format->cpp[0];
5306 
5307 		plane_size->chroma_size.x = 0;
5308 		plane_size->chroma_size.y = 0;
5309 		/* TODO: set these based on surface format */
5310 		plane_size->chroma_size.width = fb->width / 2;
5311 		plane_size->chroma_size.height = fb->height / 2;
5312 
5313 		plane_size->chroma_pitch =
5314 			fb->pitches[1] / fb->format->cpp[1];
5315 
5316 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5317 		address->video_progressive.luma_addr.low_part =
5318 			lower_32_bits(luma_addr);
5319 		address->video_progressive.luma_addr.high_part =
5320 			upper_32_bits(luma_addr);
5321 		address->video_progressive.chroma_addr.low_part =
5322 			lower_32_bits(chroma_addr);
5323 		address->video_progressive.chroma_addr.high_part =
5324 			upper_32_bits(chroma_addr);
5325 	}
5326 
5327 	if (adev->family >= AMDGPU_FAMILY_AI) {
5328 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5329 								rotation, plane_size,
5330 								tiling_info, dcc,
5331 								address,
5332 								force_disable_dcc);
5333 		if (ret)
5334 			return ret;
5335 	} else {
5336 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5337 	}
5338 
5339 	return 0;
5340 }
5341 
5342 static void
5343 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5344 			       bool *per_pixel_alpha, bool *global_alpha,
5345 			       int *global_alpha_value)
5346 {
5347 	*per_pixel_alpha = false;
5348 	*global_alpha = false;
5349 	*global_alpha_value = 0xff;
5350 
5351 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5352 		return;
5353 
5354 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5355 		static const uint32_t alpha_formats[] = {
5356 			DRM_FORMAT_ARGB8888,
5357 			DRM_FORMAT_RGBA8888,
5358 			DRM_FORMAT_ABGR8888,
5359 		};
5360 		uint32_t format = plane_state->fb->format->format;
5361 		unsigned int i;
5362 
5363 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5364 			if (format == alpha_formats[i]) {
5365 				*per_pixel_alpha = true;
5366 				break;
5367 			}
5368 		}
5369 	}
5370 
5371 	if (plane_state->alpha < 0xffff) {
5372 		*global_alpha = true;
5373 		*global_alpha_value = plane_state->alpha >> 8;
5374 	}
5375 }
5376 
5377 static int
5378 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5379 			    const enum surface_pixel_format format,
5380 			    enum dc_color_space *color_space)
5381 {
5382 	bool full_range;
5383 
5384 	*color_space = COLOR_SPACE_SRGB;
5385 
5386 	/* DRM color properties only affect non-RGB formats. */
5387 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5388 		return 0;
5389 
5390 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5391 
5392 	switch (plane_state->color_encoding) {
5393 	case DRM_COLOR_YCBCR_BT601:
5394 		if (full_range)
5395 			*color_space = COLOR_SPACE_YCBCR601;
5396 		else
5397 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5398 		break;
5399 
5400 	case DRM_COLOR_YCBCR_BT709:
5401 		if (full_range)
5402 			*color_space = COLOR_SPACE_YCBCR709;
5403 		else
5404 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5405 		break;
5406 
5407 	case DRM_COLOR_YCBCR_BT2020:
5408 		if (full_range)
5409 			*color_space = COLOR_SPACE_2020_YCBCR;
5410 		else
5411 			return -EINVAL;
5412 		break;
5413 
5414 	default:
5415 		return -EINVAL;
5416 	}
5417 
5418 	return 0;
5419 }
5420 
5421 static int
5422 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5423 			    const struct drm_plane_state *plane_state,
5424 			    const uint64_t tiling_flags,
5425 			    struct dc_plane_info *plane_info,
5426 			    struct dc_plane_address *address,
5427 			    bool tmz_surface,
5428 			    bool force_disable_dcc)
5429 {
5430 	const struct drm_framebuffer *fb = plane_state->fb;
5431 	const struct amdgpu_framebuffer *afb =
5432 		to_amdgpu_framebuffer(plane_state->fb);
5433 	int ret;
5434 
5435 	memset(plane_info, 0, sizeof(*plane_info));
5436 
5437 	switch (fb->format->format) {
5438 	case DRM_FORMAT_C8:
5439 		plane_info->format =
5440 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5441 		break;
5442 	case DRM_FORMAT_RGB565:
5443 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5444 		break;
5445 	case DRM_FORMAT_XRGB8888:
5446 	case DRM_FORMAT_ARGB8888:
5447 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5448 		break;
5449 	case DRM_FORMAT_XRGB2101010:
5450 	case DRM_FORMAT_ARGB2101010:
5451 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5452 		break;
5453 	case DRM_FORMAT_XBGR2101010:
5454 	case DRM_FORMAT_ABGR2101010:
5455 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5456 		break;
5457 	case DRM_FORMAT_XBGR8888:
5458 	case DRM_FORMAT_ABGR8888:
5459 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5460 		break;
5461 	case DRM_FORMAT_NV21:
5462 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5463 		break;
5464 	case DRM_FORMAT_NV12:
5465 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5466 		break;
5467 	case DRM_FORMAT_P010:
5468 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5469 		break;
5470 	case DRM_FORMAT_XRGB16161616F:
5471 	case DRM_FORMAT_ARGB16161616F:
5472 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5473 		break;
5474 	case DRM_FORMAT_XBGR16161616F:
5475 	case DRM_FORMAT_ABGR16161616F:
5476 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5477 		break;
5478 	case DRM_FORMAT_XRGB16161616:
5479 	case DRM_FORMAT_ARGB16161616:
5480 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5481 		break;
5482 	case DRM_FORMAT_XBGR16161616:
5483 	case DRM_FORMAT_ABGR16161616:
5484 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5485 		break;
5486 	default:
5487 		DRM_ERROR(
5488 			"Unsupported screen format %p4cc\n",
5489 			&fb->format->format);
5490 		return -EINVAL;
5491 	}
5492 
5493 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5494 	case DRM_MODE_ROTATE_0:
5495 		plane_info->rotation = ROTATION_ANGLE_0;
5496 		break;
5497 	case DRM_MODE_ROTATE_90:
5498 		plane_info->rotation = ROTATION_ANGLE_90;
5499 		break;
5500 	case DRM_MODE_ROTATE_180:
5501 		plane_info->rotation = ROTATION_ANGLE_180;
5502 		break;
5503 	case DRM_MODE_ROTATE_270:
5504 		plane_info->rotation = ROTATION_ANGLE_270;
5505 		break;
5506 	default:
5507 		plane_info->rotation = ROTATION_ANGLE_0;
5508 		break;
5509 	}
5510 
5511 	plane_info->visible = true;
5512 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5513 
5514 	plane_info->layer_index = 0;
5515 
5516 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5517 					  &plane_info->color_space);
5518 	if (ret)
5519 		return ret;
5520 
5521 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5522 					   plane_info->rotation, tiling_flags,
5523 					   &plane_info->tiling_info,
5524 					   &plane_info->plane_size,
5525 					   &plane_info->dcc, address, tmz_surface,
5526 					   force_disable_dcc);
5527 	if (ret)
5528 		return ret;
5529 
5530 	fill_blending_from_plane_state(
5531 		plane_state, &plane_info->per_pixel_alpha,
5532 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5533 
5534 	return 0;
5535 }
5536 
5537 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5538 				    struct dc_plane_state *dc_plane_state,
5539 				    struct drm_plane_state *plane_state,
5540 				    struct drm_crtc_state *crtc_state)
5541 {
5542 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5543 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5544 	struct dc_scaling_info scaling_info;
5545 	struct dc_plane_info plane_info;
5546 	int ret;
5547 	bool force_disable_dcc = false;
5548 
5549 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5550 	if (ret)
5551 		return ret;
5552 
5553 	dc_plane_state->src_rect = scaling_info.src_rect;
5554 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5555 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5556 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5557 
5558 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5559 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5560 					  afb->tiling_flags,
5561 					  &plane_info,
5562 					  &dc_plane_state->address,
5563 					  afb->tmz_surface,
5564 					  force_disable_dcc);
5565 	if (ret)
5566 		return ret;
5567 
5568 	dc_plane_state->format = plane_info.format;
5569 	dc_plane_state->color_space = plane_info.color_space;
5570 	dc_plane_state->format = plane_info.format;
5571 	dc_plane_state->plane_size = plane_info.plane_size;
5572 	dc_plane_state->rotation = plane_info.rotation;
5573 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5574 	dc_plane_state->stereo_format = plane_info.stereo_format;
5575 	dc_plane_state->tiling_info = plane_info.tiling_info;
5576 	dc_plane_state->visible = plane_info.visible;
5577 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5578 	dc_plane_state->global_alpha = plane_info.global_alpha;
5579 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5580 	dc_plane_state->dcc = plane_info.dcc;
5581 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5582 	dc_plane_state->flip_int_enabled = true;
5583 
5584 	/*
5585 	 * Always set input transfer function, since plane state is refreshed
5586 	 * every time.
5587 	 */
5588 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5589 	if (ret)
5590 		return ret;
5591 
5592 	return 0;
5593 }
5594 
5595 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5596 					   const struct dm_connector_state *dm_state,
5597 					   struct dc_stream_state *stream)
5598 {
5599 	enum amdgpu_rmx_type rmx_type;
5600 
5601 	struct rect src = { 0 }; /* viewport in composition space*/
5602 	struct rect dst = { 0 }; /* stream addressable area */
5603 
5604 	/* no mode. nothing to be done */
5605 	if (!mode)
5606 		return;
5607 
5608 	/* Full screen scaling by default */
5609 	src.width = mode->hdisplay;
5610 	src.height = mode->vdisplay;
5611 	dst.width = stream->timing.h_addressable;
5612 	dst.height = stream->timing.v_addressable;
5613 
5614 	if (dm_state) {
5615 		rmx_type = dm_state->scaling;
5616 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5617 			if (src.width * dst.height <
5618 					src.height * dst.width) {
5619 				/* height needs less upscaling/more downscaling */
5620 				dst.width = src.width *
5621 						dst.height / src.height;
5622 			} else {
5623 				/* width needs less upscaling/more downscaling */
5624 				dst.height = src.height *
5625 						dst.width / src.width;
5626 			}
5627 		} else if (rmx_type == RMX_CENTER) {
5628 			dst = src;
5629 		}
5630 
5631 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5632 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5633 
5634 		if (dm_state->underscan_enable) {
5635 			dst.x += dm_state->underscan_hborder / 2;
5636 			dst.y += dm_state->underscan_vborder / 2;
5637 			dst.width -= dm_state->underscan_hborder;
5638 			dst.height -= dm_state->underscan_vborder;
5639 		}
5640 	}
5641 
5642 	stream->src = src;
5643 	stream->dst = dst;
5644 
5645 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5646 		      dst.x, dst.y, dst.width, dst.height);
5647 
5648 }
5649 
5650 static enum dc_color_depth
5651 convert_color_depth_from_display_info(const struct drm_connector *connector,
5652 				      bool is_y420, int requested_bpc)
5653 {
5654 	uint8_t bpc;
5655 
5656 	if (is_y420) {
5657 		bpc = 8;
5658 
5659 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5660 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5661 			bpc = 16;
5662 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5663 			bpc = 12;
5664 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5665 			bpc = 10;
5666 	} else {
5667 		bpc = (uint8_t)connector->display_info.bpc;
5668 		/* Assume 8 bpc by default if no bpc is specified. */
5669 		bpc = bpc ? bpc : 8;
5670 	}
5671 
5672 	if (requested_bpc > 0) {
5673 		/*
5674 		 * Cap display bpc based on the user requested value.
5675 		 *
5676 		 * The value for state->max_bpc may not correctly updated
5677 		 * depending on when the connector gets added to the state
5678 		 * or if this was called outside of atomic check, so it
5679 		 * can't be used directly.
5680 		 */
5681 		bpc = min_t(u8, bpc, requested_bpc);
5682 
5683 		/* Round down to the nearest even number. */
5684 		bpc = bpc - (bpc & 1);
5685 	}
5686 
5687 	switch (bpc) {
5688 	case 0:
5689 		/*
5690 		 * Temporary Work around, DRM doesn't parse color depth for
5691 		 * EDID revision before 1.4
5692 		 * TODO: Fix edid parsing
5693 		 */
5694 		return COLOR_DEPTH_888;
5695 	case 6:
5696 		return COLOR_DEPTH_666;
5697 	case 8:
5698 		return COLOR_DEPTH_888;
5699 	case 10:
5700 		return COLOR_DEPTH_101010;
5701 	case 12:
5702 		return COLOR_DEPTH_121212;
5703 	case 14:
5704 		return COLOR_DEPTH_141414;
5705 	case 16:
5706 		return COLOR_DEPTH_161616;
5707 	default:
5708 		return COLOR_DEPTH_UNDEFINED;
5709 	}
5710 }
5711 
5712 static enum dc_aspect_ratio
5713 get_aspect_ratio(const struct drm_display_mode *mode_in)
5714 {
5715 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5716 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5717 }
5718 
5719 static enum dc_color_space
5720 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5721 {
5722 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5723 
5724 	switch (dc_crtc_timing->pixel_encoding)	{
5725 	case PIXEL_ENCODING_YCBCR422:
5726 	case PIXEL_ENCODING_YCBCR444:
5727 	case PIXEL_ENCODING_YCBCR420:
5728 	{
5729 		/*
5730 		 * 27030khz is the separation point between HDTV and SDTV
5731 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5732 		 * respectively
5733 		 */
5734 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5735 			if (dc_crtc_timing->flags.Y_ONLY)
5736 				color_space =
5737 					COLOR_SPACE_YCBCR709_LIMITED;
5738 			else
5739 				color_space = COLOR_SPACE_YCBCR709;
5740 		} else {
5741 			if (dc_crtc_timing->flags.Y_ONLY)
5742 				color_space =
5743 					COLOR_SPACE_YCBCR601_LIMITED;
5744 			else
5745 				color_space = COLOR_SPACE_YCBCR601;
5746 		}
5747 
5748 	}
5749 	break;
5750 	case PIXEL_ENCODING_RGB:
5751 		color_space = COLOR_SPACE_SRGB;
5752 		break;
5753 
5754 	default:
5755 		WARN_ON(1);
5756 		break;
5757 	}
5758 
5759 	return color_space;
5760 }
5761 
5762 static bool adjust_colour_depth_from_display_info(
5763 	struct dc_crtc_timing *timing_out,
5764 	const struct drm_display_info *info)
5765 {
5766 	enum dc_color_depth depth = timing_out->display_color_depth;
5767 	int normalized_clk;
5768 	do {
5769 		normalized_clk = timing_out->pix_clk_100hz / 10;
5770 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5771 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5772 			normalized_clk /= 2;
5773 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5774 		switch (depth) {
5775 		case COLOR_DEPTH_888:
5776 			break;
5777 		case COLOR_DEPTH_101010:
5778 			normalized_clk = (normalized_clk * 30) / 24;
5779 			break;
5780 		case COLOR_DEPTH_121212:
5781 			normalized_clk = (normalized_clk * 36) / 24;
5782 			break;
5783 		case COLOR_DEPTH_161616:
5784 			normalized_clk = (normalized_clk * 48) / 24;
5785 			break;
5786 		default:
5787 			/* The above depths are the only ones valid for HDMI. */
5788 			return false;
5789 		}
5790 		if (normalized_clk <= info->max_tmds_clock) {
5791 			timing_out->display_color_depth = depth;
5792 			return true;
5793 		}
5794 	} while (--depth > COLOR_DEPTH_666);
5795 	return false;
5796 }
5797 
5798 static void fill_stream_properties_from_drm_display_mode(
5799 	struct dc_stream_state *stream,
5800 	const struct drm_display_mode *mode_in,
5801 	const struct drm_connector *connector,
5802 	const struct drm_connector_state *connector_state,
5803 	const struct dc_stream_state *old_stream,
5804 	int requested_bpc)
5805 {
5806 	struct dc_crtc_timing *timing_out = &stream->timing;
5807 	const struct drm_display_info *info = &connector->display_info;
5808 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5809 	struct hdmi_vendor_infoframe hv_frame;
5810 	struct hdmi_avi_infoframe avi_frame;
5811 
5812 	memset(&hv_frame, 0, sizeof(hv_frame));
5813 	memset(&avi_frame, 0, sizeof(avi_frame));
5814 
5815 	timing_out->h_border_left = 0;
5816 	timing_out->h_border_right = 0;
5817 	timing_out->v_border_top = 0;
5818 	timing_out->v_border_bottom = 0;
5819 	/* TODO: un-hardcode */
5820 	if (drm_mode_is_420_only(info, mode_in)
5821 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5822 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5823 	else if (drm_mode_is_420_also(info, mode_in)
5824 			&& aconnector->force_yuv420_output)
5825 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5826 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5827 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5828 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5829 	else
5830 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5831 
5832 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5833 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5834 		connector,
5835 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5836 		requested_bpc);
5837 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5838 	timing_out->hdmi_vic = 0;
5839 
5840 	if(old_stream) {
5841 		timing_out->vic = old_stream->timing.vic;
5842 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5843 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5844 	} else {
5845 		timing_out->vic = drm_match_cea_mode(mode_in);
5846 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5847 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5848 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5849 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5850 	}
5851 
5852 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5853 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5854 		timing_out->vic = avi_frame.video_code;
5855 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5856 		timing_out->hdmi_vic = hv_frame.vic;
5857 	}
5858 
5859 	if (is_freesync_video_mode(mode_in, aconnector)) {
5860 		timing_out->h_addressable = mode_in->hdisplay;
5861 		timing_out->h_total = mode_in->htotal;
5862 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5863 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5864 		timing_out->v_total = mode_in->vtotal;
5865 		timing_out->v_addressable = mode_in->vdisplay;
5866 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5867 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5868 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5869 	} else {
5870 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5871 		timing_out->h_total = mode_in->crtc_htotal;
5872 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5873 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5874 		timing_out->v_total = mode_in->crtc_vtotal;
5875 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5876 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5877 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5878 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5879 	}
5880 
5881 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5882 
5883 	stream->output_color_space = get_output_color_space(timing_out);
5884 
5885 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5886 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5887 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5888 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5889 		    drm_mode_is_420_also(info, mode_in) &&
5890 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5891 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5892 			adjust_colour_depth_from_display_info(timing_out, info);
5893 		}
5894 	}
5895 }
5896 
5897 static void fill_audio_info(struct audio_info *audio_info,
5898 			    const struct drm_connector *drm_connector,
5899 			    const struct dc_sink *dc_sink)
5900 {
5901 	int i = 0;
5902 	int cea_revision = 0;
5903 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5904 
5905 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5906 	audio_info->product_id = edid_caps->product_id;
5907 
5908 	cea_revision = drm_connector->display_info.cea_rev;
5909 
5910 	strscpy(audio_info->display_name,
5911 		edid_caps->display_name,
5912 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5913 
5914 	if (cea_revision >= 3) {
5915 		audio_info->mode_count = edid_caps->audio_mode_count;
5916 
5917 		for (i = 0; i < audio_info->mode_count; ++i) {
5918 			audio_info->modes[i].format_code =
5919 					(enum audio_format_code)
5920 					(edid_caps->audio_modes[i].format_code);
5921 			audio_info->modes[i].channel_count =
5922 					edid_caps->audio_modes[i].channel_count;
5923 			audio_info->modes[i].sample_rates.all =
5924 					edid_caps->audio_modes[i].sample_rate;
5925 			audio_info->modes[i].sample_size =
5926 					edid_caps->audio_modes[i].sample_size;
5927 		}
5928 	}
5929 
5930 	audio_info->flags.all = edid_caps->speaker_flags;
5931 
5932 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5933 	if (drm_connector->latency_present[0]) {
5934 		audio_info->video_latency = drm_connector->video_latency[0];
5935 		audio_info->audio_latency = drm_connector->audio_latency[0];
5936 	}
5937 
5938 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5939 
5940 }
5941 
5942 static void
5943 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5944 				      struct drm_display_mode *dst_mode)
5945 {
5946 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5947 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5948 	dst_mode->crtc_clock = src_mode->crtc_clock;
5949 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5950 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5951 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5952 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5953 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5954 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5955 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5956 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5957 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5958 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5959 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5960 }
5961 
5962 static void
5963 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5964 					const struct drm_display_mode *native_mode,
5965 					bool scale_enabled)
5966 {
5967 	if (scale_enabled) {
5968 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5969 	} else if (native_mode->clock == drm_mode->clock &&
5970 			native_mode->htotal == drm_mode->htotal &&
5971 			native_mode->vtotal == drm_mode->vtotal) {
5972 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5973 	} else {
5974 		/* no scaling nor amdgpu inserted, no need to patch */
5975 	}
5976 }
5977 
5978 static struct dc_sink *
5979 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5980 {
5981 	struct dc_sink_init_data sink_init_data = { 0 };
5982 	struct dc_sink *sink = NULL;
5983 	sink_init_data.link = aconnector->dc_link;
5984 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5985 
5986 	sink = dc_sink_create(&sink_init_data);
5987 	if (!sink) {
5988 		DRM_ERROR("Failed to create sink!\n");
5989 		return NULL;
5990 	}
5991 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5992 
5993 	return sink;
5994 }
5995 
5996 static void set_multisync_trigger_params(
5997 		struct dc_stream_state *stream)
5998 {
5999 	struct dc_stream_state *master = NULL;
6000 
6001 	if (stream->triggered_crtc_reset.enabled) {
6002 		master = stream->triggered_crtc_reset.event_source;
6003 		stream->triggered_crtc_reset.event =
6004 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6005 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6006 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6007 	}
6008 }
6009 
6010 static void set_master_stream(struct dc_stream_state *stream_set[],
6011 			      int stream_count)
6012 {
6013 	int j, highest_rfr = 0, master_stream = 0;
6014 
6015 	for (j = 0;  j < stream_count; j++) {
6016 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6017 			int refresh_rate = 0;
6018 
6019 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6020 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6021 			if (refresh_rate > highest_rfr) {
6022 				highest_rfr = refresh_rate;
6023 				master_stream = j;
6024 			}
6025 		}
6026 	}
6027 	for (j = 0;  j < stream_count; j++) {
6028 		if (stream_set[j])
6029 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6030 	}
6031 }
6032 
6033 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6034 {
6035 	int i = 0;
6036 	struct dc_stream_state *stream;
6037 
6038 	if (context->stream_count < 2)
6039 		return;
6040 	for (i = 0; i < context->stream_count ; i++) {
6041 		if (!context->streams[i])
6042 			continue;
6043 		/*
6044 		 * TODO: add a function to read AMD VSDB bits and set
6045 		 * crtc_sync_master.multi_sync_enabled flag
6046 		 * For now it's set to false
6047 		 */
6048 	}
6049 
6050 	set_master_stream(context->streams, context->stream_count);
6051 
6052 	for (i = 0; i < context->stream_count ; i++) {
6053 		stream = context->streams[i];
6054 
6055 		if (!stream)
6056 			continue;
6057 
6058 		set_multisync_trigger_params(stream);
6059 	}
6060 }
6061 
6062 #if defined(CONFIG_DRM_AMD_DC_DCN)
6063 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6064 							struct dc_sink *sink, struct dc_stream_state *stream,
6065 							struct dsc_dec_dpcd_caps *dsc_caps)
6066 {
6067 	stream->timing.flags.DSC = 0;
6068 
6069 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6070 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6071 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6072 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6073 				      dsc_caps);
6074 	}
6075 }
6076 
6077 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6078 										struct dc_sink *sink, struct dc_stream_state *stream,
6079 										struct dsc_dec_dpcd_caps *dsc_caps)
6080 {
6081 	struct drm_connector *drm_connector = &aconnector->base;
6082 	uint32_t link_bandwidth_kbps;
6083 	uint32_t max_dsc_target_bpp_limit_override = 0;
6084 
6085 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6086 							dc_link_get_link_cap(aconnector->dc_link));
6087 
6088 	if (stream->link && stream->link->local_sink)
6089 		max_dsc_target_bpp_limit_override =
6090 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6091 
6092 	/* Set DSC policy according to dsc_clock_en */
6093 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6094 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6095 
6096 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6097 
6098 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6099 						dsc_caps,
6100 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6101 						max_dsc_target_bpp_limit_override,
6102 						link_bandwidth_kbps,
6103 						&stream->timing,
6104 						&stream->timing.dsc_cfg)) {
6105 			stream->timing.flags.DSC = 1;
6106 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6107 		}
6108 	}
6109 
6110 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6111 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6112 		stream->timing.flags.DSC = 1;
6113 
6114 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6115 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6116 
6117 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6118 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6119 
6120 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6121 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6122 }
6123 #endif /* CONFIG_DRM_AMD_DC_DCN */
6124 
6125 /**
6126  * DOC: FreeSync Video
6127  *
6128  * When a userspace application wants to play a video, the content follows a
6129  * standard format definition that usually specifies the FPS for that format.
6130  * The below list illustrates some video format and the expected FPS,
6131  * respectively:
6132  *
6133  * - TV/NTSC (23.976 FPS)
6134  * - Cinema (24 FPS)
6135  * - TV/PAL (25 FPS)
6136  * - TV/NTSC (29.97 FPS)
6137  * - TV/NTSC (30 FPS)
6138  * - Cinema HFR (48 FPS)
6139  * - TV/PAL (50 FPS)
6140  * - Commonly used (60 FPS)
6141  * - Multiples of 24 (48,72,96,120 FPS)
6142  *
6143  * The list of standards video format is not huge and can be added to the
6144  * connector modeset list beforehand. With that, userspace can leverage
6145  * FreeSync to extends the front porch in order to attain the target refresh
6146  * rate. Such a switch will happen seamlessly, without screen blanking or
6147  * reprogramming of the output in any other way. If the userspace requests a
6148  * modesetting change compatible with FreeSync modes that only differ in the
6149  * refresh rate, DC will skip the full update and avoid blink during the
6150  * transition. For example, the video player can change the modesetting from
6151  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6152  * causing any display blink. This same concept can be applied to a mode
6153  * setting change.
6154  */
6155 static struct drm_display_mode *
6156 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6157 			  bool use_probed_modes)
6158 {
6159 	struct drm_display_mode *m, *m_pref = NULL;
6160 	u16 current_refresh, highest_refresh;
6161 	struct list_head *list_head = use_probed_modes ?
6162 						    &aconnector->base.probed_modes :
6163 						    &aconnector->base.modes;
6164 
6165 	if (aconnector->freesync_vid_base.clock != 0)
6166 		return &aconnector->freesync_vid_base;
6167 
6168 	/* Find the preferred mode */
6169 	list_for_each_entry (m, list_head, head) {
6170 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6171 			m_pref = m;
6172 			break;
6173 		}
6174 	}
6175 
6176 	if (!m_pref) {
6177 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6178 		m_pref = list_first_entry_or_null(
6179 			&aconnector->base.modes, struct drm_display_mode, head);
6180 		if (!m_pref) {
6181 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6182 			return NULL;
6183 		}
6184 	}
6185 
6186 	highest_refresh = drm_mode_vrefresh(m_pref);
6187 
6188 	/*
6189 	 * Find the mode with highest refresh rate with same resolution.
6190 	 * For some monitors, preferred mode is not the mode with highest
6191 	 * supported refresh rate.
6192 	 */
6193 	list_for_each_entry (m, list_head, head) {
6194 		current_refresh  = drm_mode_vrefresh(m);
6195 
6196 		if (m->hdisplay == m_pref->hdisplay &&
6197 		    m->vdisplay == m_pref->vdisplay &&
6198 		    highest_refresh < current_refresh) {
6199 			highest_refresh = current_refresh;
6200 			m_pref = m;
6201 		}
6202 	}
6203 
6204 	aconnector->freesync_vid_base = *m_pref;
6205 	return m_pref;
6206 }
6207 
6208 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6209 				   struct amdgpu_dm_connector *aconnector)
6210 {
6211 	struct drm_display_mode *high_mode;
6212 	int timing_diff;
6213 
6214 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6215 	if (!high_mode || !mode)
6216 		return false;
6217 
6218 	timing_diff = high_mode->vtotal - mode->vtotal;
6219 
6220 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6221 	    high_mode->hdisplay != mode->hdisplay ||
6222 	    high_mode->vdisplay != mode->vdisplay ||
6223 	    high_mode->hsync_start != mode->hsync_start ||
6224 	    high_mode->hsync_end != mode->hsync_end ||
6225 	    high_mode->htotal != mode->htotal ||
6226 	    high_mode->hskew != mode->hskew ||
6227 	    high_mode->vscan != mode->vscan ||
6228 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6229 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6230 		return false;
6231 	else
6232 		return true;
6233 }
6234 
6235 static struct dc_stream_state *
6236 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6237 		       const struct drm_display_mode *drm_mode,
6238 		       const struct dm_connector_state *dm_state,
6239 		       const struct dc_stream_state *old_stream,
6240 		       int requested_bpc)
6241 {
6242 	struct drm_display_mode *preferred_mode = NULL;
6243 	struct drm_connector *drm_connector;
6244 	const struct drm_connector_state *con_state =
6245 		dm_state ? &dm_state->base : NULL;
6246 	struct dc_stream_state *stream = NULL;
6247 	struct drm_display_mode mode = *drm_mode;
6248 	struct drm_display_mode saved_mode;
6249 	struct drm_display_mode *freesync_mode = NULL;
6250 	bool native_mode_found = false;
6251 	bool recalculate_timing = false;
6252 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6253 	int mode_refresh;
6254 	int preferred_refresh = 0;
6255 #if defined(CONFIG_DRM_AMD_DC_DCN)
6256 	struct dsc_dec_dpcd_caps dsc_caps;
6257 #endif
6258 	struct dc_sink *sink = NULL;
6259 
6260 	memset(&saved_mode, 0, sizeof(saved_mode));
6261 
6262 	if (aconnector == NULL) {
6263 		DRM_ERROR("aconnector is NULL!\n");
6264 		return stream;
6265 	}
6266 
6267 	drm_connector = &aconnector->base;
6268 
6269 	if (!aconnector->dc_sink) {
6270 		sink = create_fake_sink(aconnector);
6271 		if (!sink)
6272 			return stream;
6273 	} else {
6274 		sink = aconnector->dc_sink;
6275 		dc_sink_retain(sink);
6276 	}
6277 
6278 	stream = dc_create_stream_for_sink(sink);
6279 
6280 	if (stream == NULL) {
6281 		DRM_ERROR("Failed to create stream for sink!\n");
6282 		goto finish;
6283 	}
6284 
6285 	stream->dm_stream_context = aconnector;
6286 
6287 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6288 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6289 
6290 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6291 		/* Search for preferred mode */
6292 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6293 			native_mode_found = true;
6294 			break;
6295 		}
6296 	}
6297 	if (!native_mode_found)
6298 		preferred_mode = list_first_entry_or_null(
6299 				&aconnector->base.modes,
6300 				struct drm_display_mode,
6301 				head);
6302 
6303 	mode_refresh = drm_mode_vrefresh(&mode);
6304 
6305 	if (preferred_mode == NULL) {
6306 		/*
6307 		 * This may not be an error, the use case is when we have no
6308 		 * usermode calls to reset and set mode upon hotplug. In this
6309 		 * case, we call set mode ourselves to restore the previous mode
6310 		 * and the modelist may not be filled in in time.
6311 		 */
6312 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6313 	} else {
6314 		recalculate_timing = amdgpu_freesync_vid_mode &&
6315 				 is_freesync_video_mode(&mode, aconnector);
6316 		if (recalculate_timing) {
6317 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6318 			saved_mode = mode;
6319 			mode = *freesync_mode;
6320 		} else {
6321 			decide_crtc_timing_for_drm_display_mode(
6322 				&mode, preferred_mode, scale);
6323 
6324 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6325 		}
6326 	}
6327 
6328 	if (recalculate_timing)
6329 		drm_mode_set_crtcinfo(&saved_mode, 0);
6330 	else if (!dm_state)
6331 		drm_mode_set_crtcinfo(&mode, 0);
6332 
6333        /*
6334 	* If scaling is enabled and refresh rate didn't change
6335 	* we copy the vic and polarities of the old timings
6336 	*/
6337 	if (!scale || mode_refresh != preferred_refresh)
6338 		fill_stream_properties_from_drm_display_mode(
6339 			stream, &mode, &aconnector->base, con_state, NULL,
6340 			requested_bpc);
6341 	else
6342 		fill_stream_properties_from_drm_display_mode(
6343 			stream, &mode, &aconnector->base, con_state, old_stream,
6344 			requested_bpc);
6345 
6346 #if defined(CONFIG_DRM_AMD_DC_DCN)
6347 	/* SST DSC determination policy */
6348 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6349 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6350 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6351 #endif
6352 
6353 	update_stream_scaling_settings(&mode, dm_state, stream);
6354 
6355 	fill_audio_info(
6356 		&stream->audio_info,
6357 		drm_connector,
6358 		sink);
6359 
6360 	update_stream_signal(stream, sink);
6361 
6362 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6363 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6364 
6365 	if (stream->link->psr_settings.psr_feature_enabled) {
6366 		//
6367 		// should decide stream support vsc sdp colorimetry capability
6368 		// before building vsc info packet
6369 		//
6370 		stream->use_vsc_sdp_for_colorimetry = false;
6371 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6372 			stream->use_vsc_sdp_for_colorimetry =
6373 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6374 		} else {
6375 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6376 				stream->use_vsc_sdp_for_colorimetry = true;
6377 		}
6378 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6379 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6380 
6381 	}
6382 finish:
6383 	dc_sink_release(sink);
6384 
6385 	return stream;
6386 }
6387 
6388 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6389 {
6390 	drm_crtc_cleanup(crtc);
6391 	kfree(crtc);
6392 }
6393 
6394 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6395 				  struct drm_crtc_state *state)
6396 {
6397 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6398 
6399 	/* TODO Destroy dc_stream objects are stream object is flattened */
6400 	if (cur->stream)
6401 		dc_stream_release(cur->stream);
6402 
6403 
6404 	__drm_atomic_helper_crtc_destroy_state(state);
6405 
6406 
6407 	kfree(state);
6408 }
6409 
6410 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6411 {
6412 	struct dm_crtc_state *state;
6413 
6414 	if (crtc->state)
6415 		dm_crtc_destroy_state(crtc, crtc->state);
6416 
6417 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6418 	if (WARN_ON(!state))
6419 		return;
6420 
6421 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6422 }
6423 
6424 static struct drm_crtc_state *
6425 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6426 {
6427 	struct dm_crtc_state *state, *cur;
6428 
6429 	cur = to_dm_crtc_state(crtc->state);
6430 
6431 	if (WARN_ON(!crtc->state))
6432 		return NULL;
6433 
6434 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6435 	if (!state)
6436 		return NULL;
6437 
6438 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6439 
6440 	if (cur->stream) {
6441 		state->stream = cur->stream;
6442 		dc_stream_retain(state->stream);
6443 	}
6444 
6445 	state->active_planes = cur->active_planes;
6446 	state->vrr_infopacket = cur->vrr_infopacket;
6447 	state->abm_level = cur->abm_level;
6448 	state->vrr_supported = cur->vrr_supported;
6449 	state->freesync_config = cur->freesync_config;
6450 	state->cm_has_degamma = cur->cm_has_degamma;
6451 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6452 	state->force_dpms_off = cur->force_dpms_off;
6453 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6454 
6455 	return &state->base;
6456 }
6457 
6458 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6459 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6460 {
6461 	crtc_debugfs_init(crtc);
6462 
6463 	return 0;
6464 }
6465 #endif
6466 
6467 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6468 {
6469 	enum dc_irq_source irq_source;
6470 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6471 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6472 	int rc;
6473 
6474 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6475 
6476 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6477 
6478 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6479 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6480 	return rc;
6481 }
6482 
6483 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6484 {
6485 	enum dc_irq_source irq_source;
6486 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6487 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6488 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6489 #if defined(CONFIG_DRM_AMD_DC_DCN)
6490 	struct amdgpu_display_manager *dm = &adev->dm;
6491 	struct vblank_control_work *work;
6492 #endif
6493 	int rc = 0;
6494 
6495 	if (enable) {
6496 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6497 		if (amdgpu_dm_vrr_active(acrtc_state))
6498 			rc = dm_set_vupdate_irq(crtc, true);
6499 	} else {
6500 		/* vblank irq off -> vupdate irq off */
6501 		rc = dm_set_vupdate_irq(crtc, false);
6502 	}
6503 
6504 	if (rc)
6505 		return rc;
6506 
6507 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6508 
6509 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6510 		return -EBUSY;
6511 
6512 	if (amdgpu_in_reset(adev))
6513 		return 0;
6514 
6515 #if defined(CONFIG_DRM_AMD_DC_DCN)
6516 	if (dm->vblank_control_workqueue) {
6517 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6518 		if (!work)
6519 			return -ENOMEM;
6520 
6521 		INIT_WORK(&work->work, vblank_control_worker);
6522 		work->dm = dm;
6523 		work->acrtc = acrtc;
6524 		work->enable = enable;
6525 
6526 		if (acrtc_state->stream) {
6527 			dc_stream_retain(acrtc_state->stream);
6528 			work->stream = acrtc_state->stream;
6529 		}
6530 
6531 		queue_work(dm->vblank_control_workqueue, &work->work);
6532 	}
6533 #endif
6534 
6535 	return 0;
6536 }
6537 
6538 static int dm_enable_vblank(struct drm_crtc *crtc)
6539 {
6540 	return dm_set_vblank(crtc, true);
6541 }
6542 
6543 static void dm_disable_vblank(struct drm_crtc *crtc)
6544 {
6545 	dm_set_vblank(crtc, false);
6546 }
6547 
6548 /* Implemented only the options currently availible for the driver */
6549 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6550 	.reset = dm_crtc_reset_state,
6551 	.destroy = amdgpu_dm_crtc_destroy,
6552 	.set_config = drm_atomic_helper_set_config,
6553 	.page_flip = drm_atomic_helper_page_flip,
6554 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6555 	.atomic_destroy_state = dm_crtc_destroy_state,
6556 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6557 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6558 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6559 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6560 	.enable_vblank = dm_enable_vblank,
6561 	.disable_vblank = dm_disable_vblank,
6562 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6563 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6564 	.late_register = amdgpu_dm_crtc_late_register,
6565 #endif
6566 };
6567 
6568 static enum drm_connector_status
6569 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6570 {
6571 	bool connected;
6572 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6573 
6574 	/*
6575 	 * Notes:
6576 	 * 1. This interface is NOT called in context of HPD irq.
6577 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6578 	 * makes it a bad place for *any* MST-related activity.
6579 	 */
6580 
6581 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6582 	    !aconnector->fake_enable)
6583 		connected = (aconnector->dc_sink != NULL);
6584 	else
6585 		connected = (aconnector->base.force == DRM_FORCE_ON);
6586 
6587 	update_subconnector_property(aconnector);
6588 
6589 	return (connected ? connector_status_connected :
6590 			connector_status_disconnected);
6591 }
6592 
6593 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6594 					    struct drm_connector_state *connector_state,
6595 					    struct drm_property *property,
6596 					    uint64_t val)
6597 {
6598 	struct drm_device *dev = connector->dev;
6599 	struct amdgpu_device *adev = drm_to_adev(dev);
6600 	struct dm_connector_state *dm_old_state =
6601 		to_dm_connector_state(connector->state);
6602 	struct dm_connector_state *dm_new_state =
6603 		to_dm_connector_state(connector_state);
6604 
6605 	int ret = -EINVAL;
6606 
6607 	if (property == dev->mode_config.scaling_mode_property) {
6608 		enum amdgpu_rmx_type rmx_type;
6609 
6610 		switch (val) {
6611 		case DRM_MODE_SCALE_CENTER:
6612 			rmx_type = RMX_CENTER;
6613 			break;
6614 		case DRM_MODE_SCALE_ASPECT:
6615 			rmx_type = RMX_ASPECT;
6616 			break;
6617 		case DRM_MODE_SCALE_FULLSCREEN:
6618 			rmx_type = RMX_FULL;
6619 			break;
6620 		case DRM_MODE_SCALE_NONE:
6621 		default:
6622 			rmx_type = RMX_OFF;
6623 			break;
6624 		}
6625 
6626 		if (dm_old_state->scaling == rmx_type)
6627 			return 0;
6628 
6629 		dm_new_state->scaling = rmx_type;
6630 		ret = 0;
6631 	} else if (property == adev->mode_info.underscan_hborder_property) {
6632 		dm_new_state->underscan_hborder = val;
6633 		ret = 0;
6634 	} else if (property == adev->mode_info.underscan_vborder_property) {
6635 		dm_new_state->underscan_vborder = val;
6636 		ret = 0;
6637 	} else if (property == adev->mode_info.underscan_property) {
6638 		dm_new_state->underscan_enable = val;
6639 		ret = 0;
6640 	} else if (property == adev->mode_info.abm_level_property) {
6641 		dm_new_state->abm_level = val;
6642 		ret = 0;
6643 	}
6644 
6645 	return ret;
6646 }
6647 
6648 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6649 					    const struct drm_connector_state *state,
6650 					    struct drm_property *property,
6651 					    uint64_t *val)
6652 {
6653 	struct drm_device *dev = connector->dev;
6654 	struct amdgpu_device *adev = drm_to_adev(dev);
6655 	struct dm_connector_state *dm_state =
6656 		to_dm_connector_state(state);
6657 	int ret = -EINVAL;
6658 
6659 	if (property == dev->mode_config.scaling_mode_property) {
6660 		switch (dm_state->scaling) {
6661 		case RMX_CENTER:
6662 			*val = DRM_MODE_SCALE_CENTER;
6663 			break;
6664 		case RMX_ASPECT:
6665 			*val = DRM_MODE_SCALE_ASPECT;
6666 			break;
6667 		case RMX_FULL:
6668 			*val = DRM_MODE_SCALE_FULLSCREEN;
6669 			break;
6670 		case RMX_OFF:
6671 		default:
6672 			*val = DRM_MODE_SCALE_NONE;
6673 			break;
6674 		}
6675 		ret = 0;
6676 	} else if (property == adev->mode_info.underscan_hborder_property) {
6677 		*val = dm_state->underscan_hborder;
6678 		ret = 0;
6679 	} else if (property == adev->mode_info.underscan_vborder_property) {
6680 		*val = dm_state->underscan_vborder;
6681 		ret = 0;
6682 	} else if (property == adev->mode_info.underscan_property) {
6683 		*val = dm_state->underscan_enable;
6684 		ret = 0;
6685 	} else if (property == adev->mode_info.abm_level_property) {
6686 		*val = dm_state->abm_level;
6687 		ret = 0;
6688 	}
6689 
6690 	return ret;
6691 }
6692 
6693 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6694 {
6695 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6696 
6697 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6698 }
6699 
6700 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6701 {
6702 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6703 	const struct dc_link *link = aconnector->dc_link;
6704 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6705 	struct amdgpu_display_manager *dm = &adev->dm;
6706 	int i;
6707 
6708 	/*
6709 	 * Call only if mst_mgr was iniitalized before since it's not done
6710 	 * for all connector types.
6711 	 */
6712 	if (aconnector->mst_mgr.dev)
6713 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6714 
6715 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6716 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6717 	for (i = 0; i < dm->num_of_edps; i++) {
6718 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6719 			backlight_device_unregister(dm->backlight_dev[i]);
6720 			dm->backlight_dev[i] = NULL;
6721 		}
6722 	}
6723 #endif
6724 
6725 	if (aconnector->dc_em_sink)
6726 		dc_sink_release(aconnector->dc_em_sink);
6727 	aconnector->dc_em_sink = NULL;
6728 	if (aconnector->dc_sink)
6729 		dc_sink_release(aconnector->dc_sink);
6730 	aconnector->dc_sink = NULL;
6731 
6732 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6733 	drm_connector_unregister(connector);
6734 	drm_connector_cleanup(connector);
6735 	if (aconnector->i2c) {
6736 		i2c_del_adapter(&aconnector->i2c->base);
6737 		kfree(aconnector->i2c);
6738 	}
6739 	kfree(aconnector->dm_dp_aux.aux.name);
6740 
6741 	kfree(connector);
6742 }
6743 
6744 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6745 {
6746 	struct dm_connector_state *state =
6747 		to_dm_connector_state(connector->state);
6748 
6749 	if (connector->state)
6750 		__drm_atomic_helper_connector_destroy_state(connector->state);
6751 
6752 	kfree(state);
6753 
6754 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6755 
6756 	if (state) {
6757 		state->scaling = RMX_OFF;
6758 		state->underscan_enable = false;
6759 		state->underscan_hborder = 0;
6760 		state->underscan_vborder = 0;
6761 		state->base.max_requested_bpc = 8;
6762 		state->vcpi_slots = 0;
6763 		state->pbn = 0;
6764 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6765 			state->abm_level = amdgpu_dm_abm_level;
6766 
6767 		__drm_atomic_helper_connector_reset(connector, &state->base);
6768 	}
6769 }
6770 
6771 struct drm_connector_state *
6772 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6773 {
6774 	struct dm_connector_state *state =
6775 		to_dm_connector_state(connector->state);
6776 
6777 	struct dm_connector_state *new_state =
6778 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6779 
6780 	if (!new_state)
6781 		return NULL;
6782 
6783 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6784 
6785 	new_state->freesync_capable = state->freesync_capable;
6786 	new_state->abm_level = state->abm_level;
6787 	new_state->scaling = state->scaling;
6788 	new_state->underscan_enable = state->underscan_enable;
6789 	new_state->underscan_hborder = state->underscan_hborder;
6790 	new_state->underscan_vborder = state->underscan_vborder;
6791 	new_state->vcpi_slots = state->vcpi_slots;
6792 	new_state->pbn = state->pbn;
6793 	return &new_state->base;
6794 }
6795 
6796 static int
6797 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6798 {
6799 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6800 		to_amdgpu_dm_connector(connector);
6801 	int r;
6802 
6803 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6804 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6805 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6806 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6807 		if (r)
6808 			return r;
6809 	}
6810 
6811 #if defined(CONFIG_DEBUG_FS)
6812 	connector_debugfs_init(amdgpu_dm_connector);
6813 #endif
6814 
6815 	return 0;
6816 }
6817 
6818 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6819 	.reset = amdgpu_dm_connector_funcs_reset,
6820 	.detect = amdgpu_dm_connector_detect,
6821 	.fill_modes = drm_helper_probe_single_connector_modes,
6822 	.destroy = amdgpu_dm_connector_destroy,
6823 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6824 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6825 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6826 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6827 	.late_register = amdgpu_dm_connector_late_register,
6828 	.early_unregister = amdgpu_dm_connector_unregister
6829 };
6830 
6831 static int get_modes(struct drm_connector *connector)
6832 {
6833 	return amdgpu_dm_connector_get_modes(connector);
6834 }
6835 
6836 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6837 {
6838 	struct dc_sink_init_data init_params = {
6839 			.link = aconnector->dc_link,
6840 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6841 	};
6842 	struct edid *edid;
6843 
6844 	if (!aconnector->base.edid_blob_ptr) {
6845 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6846 				aconnector->base.name);
6847 
6848 		aconnector->base.force = DRM_FORCE_OFF;
6849 		aconnector->base.override_edid = false;
6850 		return;
6851 	}
6852 
6853 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6854 
6855 	aconnector->edid = edid;
6856 
6857 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6858 		aconnector->dc_link,
6859 		(uint8_t *)edid,
6860 		(edid->extensions + 1) * EDID_LENGTH,
6861 		&init_params);
6862 
6863 	if (aconnector->base.force == DRM_FORCE_ON) {
6864 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6865 		aconnector->dc_link->local_sink :
6866 		aconnector->dc_em_sink;
6867 		dc_sink_retain(aconnector->dc_sink);
6868 	}
6869 }
6870 
6871 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6872 {
6873 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6874 
6875 	/*
6876 	 * In case of headless boot with force on for DP managed connector
6877 	 * Those settings have to be != 0 to get initial modeset
6878 	 */
6879 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6880 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6881 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6882 	}
6883 
6884 
6885 	aconnector->base.override_edid = true;
6886 	create_eml_sink(aconnector);
6887 }
6888 
6889 static struct dc_stream_state *
6890 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6891 				const struct drm_display_mode *drm_mode,
6892 				const struct dm_connector_state *dm_state,
6893 				const struct dc_stream_state *old_stream)
6894 {
6895 	struct drm_connector *connector = &aconnector->base;
6896 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6897 	struct dc_stream_state *stream;
6898 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6899 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6900 	enum dc_status dc_result = DC_OK;
6901 
6902 	do {
6903 		stream = create_stream_for_sink(aconnector, drm_mode,
6904 						dm_state, old_stream,
6905 						requested_bpc);
6906 		if (stream == NULL) {
6907 			DRM_ERROR("Failed to create stream for sink!\n");
6908 			break;
6909 		}
6910 
6911 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6912 
6913 		if (dc_result != DC_OK) {
6914 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6915 				      drm_mode->hdisplay,
6916 				      drm_mode->vdisplay,
6917 				      drm_mode->clock,
6918 				      dc_result,
6919 				      dc_status_to_str(dc_result));
6920 
6921 			dc_stream_release(stream);
6922 			stream = NULL;
6923 			requested_bpc -= 2; /* lower bpc to retry validation */
6924 		}
6925 
6926 	} while (stream == NULL && requested_bpc >= 6);
6927 
6928 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6929 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6930 
6931 		aconnector->force_yuv420_output = true;
6932 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6933 						dm_state, old_stream);
6934 		aconnector->force_yuv420_output = false;
6935 	}
6936 
6937 	return stream;
6938 }
6939 
6940 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6941 				   struct drm_display_mode *mode)
6942 {
6943 	int result = MODE_ERROR;
6944 	struct dc_sink *dc_sink;
6945 	/* TODO: Unhardcode stream count */
6946 	struct dc_stream_state *stream;
6947 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6948 
6949 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6950 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6951 		return result;
6952 
6953 	/*
6954 	 * Only run this the first time mode_valid is called to initilialize
6955 	 * EDID mgmt
6956 	 */
6957 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6958 		!aconnector->dc_em_sink)
6959 		handle_edid_mgmt(aconnector);
6960 
6961 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6962 
6963 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6964 				aconnector->base.force != DRM_FORCE_ON) {
6965 		DRM_ERROR("dc_sink is NULL!\n");
6966 		goto fail;
6967 	}
6968 
6969 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6970 	if (stream) {
6971 		dc_stream_release(stream);
6972 		result = MODE_OK;
6973 	}
6974 
6975 fail:
6976 	/* TODO: error handling*/
6977 	return result;
6978 }
6979 
6980 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6981 				struct dc_info_packet *out)
6982 {
6983 	struct hdmi_drm_infoframe frame;
6984 	unsigned char buf[30]; /* 26 + 4 */
6985 	ssize_t len;
6986 	int ret, i;
6987 
6988 	memset(out, 0, sizeof(*out));
6989 
6990 	if (!state->hdr_output_metadata)
6991 		return 0;
6992 
6993 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6994 	if (ret)
6995 		return ret;
6996 
6997 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6998 	if (len < 0)
6999 		return (int)len;
7000 
7001 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7002 	if (len != 30)
7003 		return -EINVAL;
7004 
7005 	/* Prepare the infopacket for DC. */
7006 	switch (state->connector->connector_type) {
7007 	case DRM_MODE_CONNECTOR_HDMIA:
7008 		out->hb0 = 0x87; /* type */
7009 		out->hb1 = 0x01; /* version */
7010 		out->hb2 = 0x1A; /* length */
7011 		out->sb[0] = buf[3]; /* checksum */
7012 		i = 1;
7013 		break;
7014 
7015 	case DRM_MODE_CONNECTOR_DisplayPort:
7016 	case DRM_MODE_CONNECTOR_eDP:
7017 		out->hb0 = 0x00; /* sdp id, zero */
7018 		out->hb1 = 0x87; /* type */
7019 		out->hb2 = 0x1D; /* payload len - 1 */
7020 		out->hb3 = (0x13 << 2); /* sdp version */
7021 		out->sb[0] = 0x01; /* version */
7022 		out->sb[1] = 0x1A; /* length */
7023 		i = 2;
7024 		break;
7025 
7026 	default:
7027 		return -EINVAL;
7028 	}
7029 
7030 	memcpy(&out->sb[i], &buf[4], 26);
7031 	out->valid = true;
7032 
7033 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7034 		       sizeof(out->sb), false);
7035 
7036 	return 0;
7037 }
7038 
7039 static int
7040 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7041 				 struct drm_atomic_state *state)
7042 {
7043 	struct drm_connector_state *new_con_state =
7044 		drm_atomic_get_new_connector_state(state, conn);
7045 	struct drm_connector_state *old_con_state =
7046 		drm_atomic_get_old_connector_state(state, conn);
7047 	struct drm_crtc *crtc = new_con_state->crtc;
7048 	struct drm_crtc_state *new_crtc_state;
7049 	int ret;
7050 
7051 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7052 
7053 	if (!crtc)
7054 		return 0;
7055 
7056 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7057 		struct dc_info_packet hdr_infopacket;
7058 
7059 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7060 		if (ret)
7061 			return ret;
7062 
7063 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7064 		if (IS_ERR(new_crtc_state))
7065 			return PTR_ERR(new_crtc_state);
7066 
7067 		/*
7068 		 * DC considers the stream backends changed if the
7069 		 * static metadata changes. Forcing the modeset also
7070 		 * gives a simple way for userspace to switch from
7071 		 * 8bpc to 10bpc when setting the metadata to enter
7072 		 * or exit HDR.
7073 		 *
7074 		 * Changing the static metadata after it's been
7075 		 * set is permissible, however. So only force a
7076 		 * modeset if we're entering or exiting HDR.
7077 		 */
7078 		new_crtc_state->mode_changed =
7079 			!old_con_state->hdr_output_metadata ||
7080 			!new_con_state->hdr_output_metadata;
7081 	}
7082 
7083 	return 0;
7084 }
7085 
7086 static const struct drm_connector_helper_funcs
7087 amdgpu_dm_connector_helper_funcs = {
7088 	/*
7089 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7090 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7091 	 * are missing after user start lightdm. So we need to renew modes list.
7092 	 * in get_modes call back, not just return the modes count
7093 	 */
7094 	.get_modes = get_modes,
7095 	.mode_valid = amdgpu_dm_connector_mode_valid,
7096 	.atomic_check = amdgpu_dm_connector_atomic_check,
7097 };
7098 
7099 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7100 {
7101 }
7102 
7103 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7104 {
7105 	struct drm_atomic_state *state = new_crtc_state->state;
7106 	struct drm_plane *plane;
7107 	int num_active = 0;
7108 
7109 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7110 		struct drm_plane_state *new_plane_state;
7111 
7112 		/* Cursor planes are "fake". */
7113 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7114 			continue;
7115 
7116 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7117 
7118 		if (!new_plane_state) {
7119 			/*
7120 			 * The plane is enable on the CRTC and hasn't changed
7121 			 * state. This means that it previously passed
7122 			 * validation and is therefore enabled.
7123 			 */
7124 			num_active += 1;
7125 			continue;
7126 		}
7127 
7128 		/* We need a framebuffer to be considered enabled. */
7129 		num_active += (new_plane_state->fb != NULL);
7130 	}
7131 
7132 	return num_active;
7133 }
7134 
7135 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7136 					 struct drm_crtc_state *new_crtc_state)
7137 {
7138 	struct dm_crtc_state *dm_new_crtc_state =
7139 		to_dm_crtc_state(new_crtc_state);
7140 
7141 	dm_new_crtc_state->active_planes = 0;
7142 
7143 	if (!dm_new_crtc_state->stream)
7144 		return;
7145 
7146 	dm_new_crtc_state->active_planes =
7147 		count_crtc_active_planes(new_crtc_state);
7148 }
7149 
7150 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7151 				       struct drm_atomic_state *state)
7152 {
7153 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7154 									  crtc);
7155 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7156 	struct dc *dc = adev->dm.dc;
7157 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7158 	int ret = -EINVAL;
7159 
7160 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7161 
7162 	dm_update_crtc_active_planes(crtc, crtc_state);
7163 
7164 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7165 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7166 		return ret;
7167 	}
7168 
7169 	/*
7170 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7171 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7172 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7173 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7174 	 */
7175 	if (crtc_state->enable &&
7176 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7177 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7178 		return -EINVAL;
7179 	}
7180 
7181 	/* In some use cases, like reset, no stream is attached */
7182 	if (!dm_crtc_state->stream)
7183 		return 0;
7184 
7185 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7186 		return 0;
7187 
7188 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7189 	return ret;
7190 }
7191 
7192 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7193 				      const struct drm_display_mode *mode,
7194 				      struct drm_display_mode *adjusted_mode)
7195 {
7196 	return true;
7197 }
7198 
7199 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7200 	.disable = dm_crtc_helper_disable,
7201 	.atomic_check = dm_crtc_helper_atomic_check,
7202 	.mode_fixup = dm_crtc_helper_mode_fixup,
7203 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7204 };
7205 
7206 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7207 {
7208 
7209 }
7210 
7211 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7212 {
7213 	switch (display_color_depth) {
7214 		case COLOR_DEPTH_666:
7215 			return 6;
7216 		case COLOR_DEPTH_888:
7217 			return 8;
7218 		case COLOR_DEPTH_101010:
7219 			return 10;
7220 		case COLOR_DEPTH_121212:
7221 			return 12;
7222 		case COLOR_DEPTH_141414:
7223 			return 14;
7224 		case COLOR_DEPTH_161616:
7225 			return 16;
7226 		default:
7227 			break;
7228 		}
7229 	return 0;
7230 }
7231 
7232 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7233 					  struct drm_crtc_state *crtc_state,
7234 					  struct drm_connector_state *conn_state)
7235 {
7236 	struct drm_atomic_state *state = crtc_state->state;
7237 	struct drm_connector *connector = conn_state->connector;
7238 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7239 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7240 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7241 	struct drm_dp_mst_topology_mgr *mst_mgr;
7242 	struct drm_dp_mst_port *mst_port;
7243 	enum dc_color_depth color_depth;
7244 	int clock, bpp = 0;
7245 	bool is_y420 = false;
7246 
7247 	if (!aconnector->port || !aconnector->dc_sink)
7248 		return 0;
7249 
7250 	mst_port = aconnector->port;
7251 	mst_mgr = &aconnector->mst_port->mst_mgr;
7252 
7253 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7254 		return 0;
7255 
7256 	if (!state->duplicated) {
7257 		int max_bpc = conn_state->max_requested_bpc;
7258 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7259 				aconnector->force_yuv420_output;
7260 		color_depth = convert_color_depth_from_display_info(connector,
7261 								    is_y420,
7262 								    max_bpc);
7263 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7264 		clock = adjusted_mode->clock;
7265 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7266 	}
7267 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7268 									   mst_mgr,
7269 									   mst_port,
7270 									   dm_new_connector_state->pbn,
7271 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7272 	if (dm_new_connector_state->vcpi_slots < 0) {
7273 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7274 		return dm_new_connector_state->vcpi_slots;
7275 	}
7276 	return 0;
7277 }
7278 
7279 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7280 	.disable = dm_encoder_helper_disable,
7281 	.atomic_check = dm_encoder_helper_atomic_check
7282 };
7283 
7284 #if defined(CONFIG_DRM_AMD_DC_DCN)
7285 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7286 					    struct dc_state *dc_state,
7287 					    struct dsc_mst_fairness_vars *vars)
7288 {
7289 	struct dc_stream_state *stream = NULL;
7290 	struct drm_connector *connector;
7291 	struct drm_connector_state *new_con_state;
7292 	struct amdgpu_dm_connector *aconnector;
7293 	struct dm_connector_state *dm_conn_state;
7294 	int i, j;
7295 	int vcpi, pbn_div, pbn, slot_num = 0;
7296 
7297 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7298 
7299 		aconnector = to_amdgpu_dm_connector(connector);
7300 
7301 		if (!aconnector->port)
7302 			continue;
7303 
7304 		if (!new_con_state || !new_con_state->crtc)
7305 			continue;
7306 
7307 		dm_conn_state = to_dm_connector_state(new_con_state);
7308 
7309 		for (j = 0; j < dc_state->stream_count; j++) {
7310 			stream = dc_state->streams[j];
7311 			if (!stream)
7312 				continue;
7313 
7314 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7315 				break;
7316 
7317 			stream = NULL;
7318 		}
7319 
7320 		if (!stream)
7321 			continue;
7322 
7323 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7324 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7325 		for (j = 0; j < dc_state->stream_count; j++) {
7326 			if (vars[j].aconnector == aconnector) {
7327 				pbn = vars[j].pbn;
7328 				break;
7329 			}
7330 		}
7331 
7332 		if (j == dc_state->stream_count)
7333 			continue;
7334 
7335 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7336 
7337 		if (stream->timing.flags.DSC != 1) {
7338 			dm_conn_state->pbn = pbn;
7339 			dm_conn_state->vcpi_slots = slot_num;
7340 
7341 			drm_dp_mst_atomic_enable_dsc(state,
7342 						     aconnector->port,
7343 						     dm_conn_state->pbn,
7344 						     0,
7345 						     false);
7346 			continue;
7347 		}
7348 
7349 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7350 						    aconnector->port,
7351 						    pbn, pbn_div,
7352 						    true);
7353 		if (vcpi < 0)
7354 			return vcpi;
7355 
7356 		dm_conn_state->pbn = pbn;
7357 		dm_conn_state->vcpi_slots = vcpi;
7358 	}
7359 	return 0;
7360 }
7361 #endif
7362 
7363 static void dm_drm_plane_reset(struct drm_plane *plane)
7364 {
7365 	struct dm_plane_state *amdgpu_state = NULL;
7366 
7367 	if (plane->state)
7368 		plane->funcs->atomic_destroy_state(plane, plane->state);
7369 
7370 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7371 	WARN_ON(amdgpu_state == NULL);
7372 
7373 	if (amdgpu_state)
7374 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7375 }
7376 
7377 static struct drm_plane_state *
7378 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7379 {
7380 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7381 
7382 	old_dm_plane_state = to_dm_plane_state(plane->state);
7383 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7384 	if (!dm_plane_state)
7385 		return NULL;
7386 
7387 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7388 
7389 	if (old_dm_plane_state->dc_state) {
7390 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7391 		dc_plane_state_retain(dm_plane_state->dc_state);
7392 	}
7393 
7394 	return &dm_plane_state->base;
7395 }
7396 
7397 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7398 				struct drm_plane_state *state)
7399 {
7400 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7401 
7402 	if (dm_plane_state->dc_state)
7403 		dc_plane_state_release(dm_plane_state->dc_state);
7404 
7405 	drm_atomic_helper_plane_destroy_state(plane, state);
7406 }
7407 
7408 static const struct drm_plane_funcs dm_plane_funcs = {
7409 	.update_plane	= drm_atomic_helper_update_plane,
7410 	.disable_plane	= drm_atomic_helper_disable_plane,
7411 	.destroy	= drm_primary_helper_destroy,
7412 	.reset = dm_drm_plane_reset,
7413 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7414 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7415 	.format_mod_supported = dm_plane_format_mod_supported,
7416 };
7417 
7418 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7419 				      struct drm_plane_state *new_state)
7420 {
7421 	struct amdgpu_framebuffer *afb;
7422 	struct drm_gem_object *obj;
7423 	struct amdgpu_device *adev;
7424 	struct amdgpu_bo *rbo;
7425 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7426 	struct list_head list;
7427 	struct ttm_validate_buffer tv;
7428 	struct ww_acquire_ctx ticket;
7429 	uint32_t domain;
7430 	int r;
7431 
7432 	if (!new_state->fb) {
7433 		DRM_DEBUG_KMS("No FB bound\n");
7434 		return 0;
7435 	}
7436 
7437 	afb = to_amdgpu_framebuffer(new_state->fb);
7438 	obj = new_state->fb->obj[0];
7439 	rbo = gem_to_amdgpu_bo(obj);
7440 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7441 	INIT_LIST_HEAD(&list);
7442 
7443 	tv.bo = &rbo->tbo;
7444 	tv.num_shared = 1;
7445 	list_add(&tv.head, &list);
7446 
7447 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7448 	if (r) {
7449 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7450 		return r;
7451 	}
7452 
7453 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7454 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7455 	else
7456 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7457 
7458 	r = amdgpu_bo_pin(rbo, domain);
7459 	if (unlikely(r != 0)) {
7460 		if (r != -ERESTARTSYS)
7461 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7462 		ttm_eu_backoff_reservation(&ticket, &list);
7463 		return r;
7464 	}
7465 
7466 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7467 	if (unlikely(r != 0)) {
7468 		amdgpu_bo_unpin(rbo);
7469 		ttm_eu_backoff_reservation(&ticket, &list);
7470 		DRM_ERROR("%p bind failed\n", rbo);
7471 		return r;
7472 	}
7473 
7474 	ttm_eu_backoff_reservation(&ticket, &list);
7475 
7476 	afb->address = amdgpu_bo_gpu_offset(rbo);
7477 
7478 	amdgpu_bo_ref(rbo);
7479 
7480 	/**
7481 	 * We don't do surface updates on planes that have been newly created,
7482 	 * but we also don't have the afb->address during atomic check.
7483 	 *
7484 	 * Fill in buffer attributes depending on the address here, but only on
7485 	 * newly created planes since they're not being used by DC yet and this
7486 	 * won't modify global state.
7487 	 */
7488 	dm_plane_state_old = to_dm_plane_state(plane->state);
7489 	dm_plane_state_new = to_dm_plane_state(new_state);
7490 
7491 	if (dm_plane_state_new->dc_state &&
7492 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7493 		struct dc_plane_state *plane_state =
7494 			dm_plane_state_new->dc_state;
7495 		bool force_disable_dcc = !plane_state->dcc.enable;
7496 
7497 		fill_plane_buffer_attributes(
7498 			adev, afb, plane_state->format, plane_state->rotation,
7499 			afb->tiling_flags,
7500 			&plane_state->tiling_info, &plane_state->plane_size,
7501 			&plane_state->dcc, &plane_state->address,
7502 			afb->tmz_surface, force_disable_dcc);
7503 	}
7504 
7505 	return 0;
7506 }
7507 
7508 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7509 				       struct drm_plane_state *old_state)
7510 {
7511 	struct amdgpu_bo *rbo;
7512 	int r;
7513 
7514 	if (!old_state->fb)
7515 		return;
7516 
7517 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7518 	r = amdgpu_bo_reserve(rbo, false);
7519 	if (unlikely(r)) {
7520 		DRM_ERROR("failed to reserve rbo before unpin\n");
7521 		return;
7522 	}
7523 
7524 	amdgpu_bo_unpin(rbo);
7525 	amdgpu_bo_unreserve(rbo);
7526 	amdgpu_bo_unref(&rbo);
7527 }
7528 
7529 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7530 				       struct drm_crtc_state *new_crtc_state)
7531 {
7532 	struct drm_framebuffer *fb = state->fb;
7533 	int min_downscale, max_upscale;
7534 	int min_scale = 0;
7535 	int max_scale = INT_MAX;
7536 
7537 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7538 	if (fb && state->crtc) {
7539 		/* Validate viewport to cover the case when only the position changes */
7540 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7541 			int viewport_width = state->crtc_w;
7542 			int viewport_height = state->crtc_h;
7543 
7544 			if (state->crtc_x < 0)
7545 				viewport_width += state->crtc_x;
7546 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7547 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7548 
7549 			if (state->crtc_y < 0)
7550 				viewport_height += state->crtc_y;
7551 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7552 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7553 
7554 			if (viewport_width < 0 || viewport_height < 0) {
7555 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7556 				return -EINVAL;
7557 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7558 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7559 				return -EINVAL;
7560 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7561 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7562 				return -EINVAL;
7563 			}
7564 
7565 		}
7566 
7567 		/* Get min/max allowed scaling factors from plane caps. */
7568 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7569 					     &min_downscale, &max_upscale);
7570 		/*
7571 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7572 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7573 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7574 		 */
7575 		min_scale = (1000 << 16) / max_upscale;
7576 		max_scale = (1000 << 16) / min_downscale;
7577 	}
7578 
7579 	return drm_atomic_helper_check_plane_state(
7580 		state, new_crtc_state, min_scale, max_scale, true, true);
7581 }
7582 
7583 static int dm_plane_atomic_check(struct drm_plane *plane,
7584 				 struct drm_atomic_state *state)
7585 {
7586 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7587 										 plane);
7588 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7589 	struct dc *dc = adev->dm.dc;
7590 	struct dm_plane_state *dm_plane_state;
7591 	struct dc_scaling_info scaling_info;
7592 	struct drm_crtc_state *new_crtc_state;
7593 	int ret;
7594 
7595 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7596 
7597 	dm_plane_state = to_dm_plane_state(new_plane_state);
7598 
7599 	if (!dm_plane_state->dc_state)
7600 		return 0;
7601 
7602 	new_crtc_state =
7603 		drm_atomic_get_new_crtc_state(state,
7604 					      new_plane_state->crtc);
7605 	if (!new_crtc_state)
7606 		return -EINVAL;
7607 
7608 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7609 	if (ret)
7610 		return ret;
7611 
7612 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7613 	if (ret)
7614 		return ret;
7615 
7616 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7617 		return 0;
7618 
7619 	return -EINVAL;
7620 }
7621 
7622 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7623 				       struct drm_atomic_state *state)
7624 {
7625 	/* Only support async updates on cursor planes. */
7626 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7627 		return -EINVAL;
7628 
7629 	return 0;
7630 }
7631 
7632 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7633 					 struct drm_atomic_state *state)
7634 {
7635 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7636 									   plane);
7637 	struct drm_plane_state *old_state =
7638 		drm_atomic_get_old_plane_state(state, plane);
7639 
7640 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7641 
7642 	swap(plane->state->fb, new_state->fb);
7643 
7644 	plane->state->src_x = new_state->src_x;
7645 	plane->state->src_y = new_state->src_y;
7646 	plane->state->src_w = new_state->src_w;
7647 	plane->state->src_h = new_state->src_h;
7648 	plane->state->crtc_x = new_state->crtc_x;
7649 	plane->state->crtc_y = new_state->crtc_y;
7650 	plane->state->crtc_w = new_state->crtc_w;
7651 	plane->state->crtc_h = new_state->crtc_h;
7652 
7653 	handle_cursor_update(plane, old_state);
7654 }
7655 
7656 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7657 	.prepare_fb = dm_plane_helper_prepare_fb,
7658 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7659 	.atomic_check = dm_plane_atomic_check,
7660 	.atomic_async_check = dm_plane_atomic_async_check,
7661 	.atomic_async_update = dm_plane_atomic_async_update
7662 };
7663 
7664 /*
7665  * TODO: these are currently initialized to rgb formats only.
7666  * For future use cases we should either initialize them dynamically based on
7667  * plane capabilities, or initialize this array to all formats, so internal drm
7668  * check will succeed, and let DC implement proper check
7669  */
7670 static const uint32_t rgb_formats[] = {
7671 	DRM_FORMAT_XRGB8888,
7672 	DRM_FORMAT_ARGB8888,
7673 	DRM_FORMAT_RGBA8888,
7674 	DRM_FORMAT_XRGB2101010,
7675 	DRM_FORMAT_XBGR2101010,
7676 	DRM_FORMAT_ARGB2101010,
7677 	DRM_FORMAT_ABGR2101010,
7678 	DRM_FORMAT_XRGB16161616,
7679 	DRM_FORMAT_XBGR16161616,
7680 	DRM_FORMAT_ARGB16161616,
7681 	DRM_FORMAT_ABGR16161616,
7682 	DRM_FORMAT_XBGR8888,
7683 	DRM_FORMAT_ABGR8888,
7684 	DRM_FORMAT_RGB565,
7685 };
7686 
7687 static const uint32_t overlay_formats[] = {
7688 	DRM_FORMAT_XRGB8888,
7689 	DRM_FORMAT_ARGB8888,
7690 	DRM_FORMAT_RGBA8888,
7691 	DRM_FORMAT_XBGR8888,
7692 	DRM_FORMAT_ABGR8888,
7693 	DRM_FORMAT_RGB565
7694 };
7695 
7696 static const u32 cursor_formats[] = {
7697 	DRM_FORMAT_ARGB8888
7698 };
7699 
7700 static int get_plane_formats(const struct drm_plane *plane,
7701 			     const struct dc_plane_cap *plane_cap,
7702 			     uint32_t *formats, int max_formats)
7703 {
7704 	int i, num_formats = 0;
7705 
7706 	/*
7707 	 * TODO: Query support for each group of formats directly from
7708 	 * DC plane caps. This will require adding more formats to the
7709 	 * caps list.
7710 	 */
7711 
7712 	switch (plane->type) {
7713 	case DRM_PLANE_TYPE_PRIMARY:
7714 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7715 			if (num_formats >= max_formats)
7716 				break;
7717 
7718 			formats[num_formats++] = rgb_formats[i];
7719 		}
7720 
7721 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7722 			formats[num_formats++] = DRM_FORMAT_NV12;
7723 		if (plane_cap && plane_cap->pixel_format_support.p010)
7724 			formats[num_formats++] = DRM_FORMAT_P010;
7725 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7726 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7727 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7728 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7729 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7730 		}
7731 		break;
7732 
7733 	case DRM_PLANE_TYPE_OVERLAY:
7734 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7735 			if (num_formats >= max_formats)
7736 				break;
7737 
7738 			formats[num_formats++] = overlay_formats[i];
7739 		}
7740 		break;
7741 
7742 	case DRM_PLANE_TYPE_CURSOR:
7743 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7744 			if (num_formats >= max_formats)
7745 				break;
7746 
7747 			formats[num_formats++] = cursor_formats[i];
7748 		}
7749 		break;
7750 	}
7751 
7752 	return num_formats;
7753 }
7754 
7755 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7756 				struct drm_plane *plane,
7757 				unsigned long possible_crtcs,
7758 				const struct dc_plane_cap *plane_cap)
7759 {
7760 	uint32_t formats[32];
7761 	int num_formats;
7762 	int res = -EPERM;
7763 	unsigned int supported_rotations;
7764 	uint64_t *modifiers = NULL;
7765 
7766 	num_formats = get_plane_formats(plane, plane_cap, formats,
7767 					ARRAY_SIZE(formats));
7768 
7769 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7770 	if (res)
7771 		return res;
7772 
7773 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7774 				       &dm_plane_funcs, formats, num_formats,
7775 				       modifiers, plane->type, NULL);
7776 	kfree(modifiers);
7777 	if (res)
7778 		return res;
7779 
7780 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7781 	    plane_cap && plane_cap->per_pixel_alpha) {
7782 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7783 					  BIT(DRM_MODE_BLEND_PREMULTI);
7784 
7785 		drm_plane_create_alpha_property(plane);
7786 		drm_plane_create_blend_mode_property(plane, blend_caps);
7787 	}
7788 
7789 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7790 	    plane_cap &&
7791 	    (plane_cap->pixel_format_support.nv12 ||
7792 	     plane_cap->pixel_format_support.p010)) {
7793 		/* This only affects YUV formats. */
7794 		drm_plane_create_color_properties(
7795 			plane,
7796 			BIT(DRM_COLOR_YCBCR_BT601) |
7797 			BIT(DRM_COLOR_YCBCR_BT709) |
7798 			BIT(DRM_COLOR_YCBCR_BT2020),
7799 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7800 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7801 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7802 	}
7803 
7804 	supported_rotations =
7805 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7806 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7807 
7808 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7809 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7810 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7811 						   supported_rotations);
7812 
7813 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7814 
7815 	/* Create (reset) the plane state */
7816 	if (plane->funcs->reset)
7817 		plane->funcs->reset(plane);
7818 
7819 	return 0;
7820 }
7821 
7822 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7823 			       struct drm_plane *plane,
7824 			       uint32_t crtc_index)
7825 {
7826 	struct amdgpu_crtc *acrtc = NULL;
7827 	struct drm_plane *cursor_plane;
7828 
7829 	int res = -ENOMEM;
7830 
7831 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7832 	if (!cursor_plane)
7833 		goto fail;
7834 
7835 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7836 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7837 
7838 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7839 	if (!acrtc)
7840 		goto fail;
7841 
7842 	res = drm_crtc_init_with_planes(
7843 			dm->ddev,
7844 			&acrtc->base,
7845 			plane,
7846 			cursor_plane,
7847 			&amdgpu_dm_crtc_funcs, NULL);
7848 
7849 	if (res)
7850 		goto fail;
7851 
7852 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7853 
7854 	/* Create (reset) the plane state */
7855 	if (acrtc->base.funcs->reset)
7856 		acrtc->base.funcs->reset(&acrtc->base);
7857 
7858 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7859 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7860 
7861 	acrtc->crtc_id = crtc_index;
7862 	acrtc->base.enabled = false;
7863 	acrtc->otg_inst = -1;
7864 
7865 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7866 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7867 				   true, MAX_COLOR_LUT_ENTRIES);
7868 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7869 
7870 	return 0;
7871 
7872 fail:
7873 	kfree(acrtc);
7874 	kfree(cursor_plane);
7875 	return res;
7876 }
7877 
7878 
7879 static int to_drm_connector_type(enum signal_type st)
7880 {
7881 	switch (st) {
7882 	case SIGNAL_TYPE_HDMI_TYPE_A:
7883 		return DRM_MODE_CONNECTOR_HDMIA;
7884 	case SIGNAL_TYPE_EDP:
7885 		return DRM_MODE_CONNECTOR_eDP;
7886 	case SIGNAL_TYPE_LVDS:
7887 		return DRM_MODE_CONNECTOR_LVDS;
7888 	case SIGNAL_TYPE_RGB:
7889 		return DRM_MODE_CONNECTOR_VGA;
7890 	case SIGNAL_TYPE_DISPLAY_PORT:
7891 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7892 		return DRM_MODE_CONNECTOR_DisplayPort;
7893 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7894 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7895 		return DRM_MODE_CONNECTOR_DVID;
7896 	case SIGNAL_TYPE_VIRTUAL:
7897 		return DRM_MODE_CONNECTOR_VIRTUAL;
7898 
7899 	default:
7900 		return DRM_MODE_CONNECTOR_Unknown;
7901 	}
7902 }
7903 
7904 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7905 {
7906 	struct drm_encoder *encoder;
7907 
7908 	/* There is only one encoder per connector */
7909 	drm_connector_for_each_possible_encoder(connector, encoder)
7910 		return encoder;
7911 
7912 	return NULL;
7913 }
7914 
7915 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7916 {
7917 	struct drm_encoder *encoder;
7918 	struct amdgpu_encoder *amdgpu_encoder;
7919 
7920 	encoder = amdgpu_dm_connector_to_encoder(connector);
7921 
7922 	if (encoder == NULL)
7923 		return;
7924 
7925 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7926 
7927 	amdgpu_encoder->native_mode.clock = 0;
7928 
7929 	if (!list_empty(&connector->probed_modes)) {
7930 		struct drm_display_mode *preferred_mode = NULL;
7931 
7932 		list_for_each_entry(preferred_mode,
7933 				    &connector->probed_modes,
7934 				    head) {
7935 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7936 				amdgpu_encoder->native_mode = *preferred_mode;
7937 
7938 			break;
7939 		}
7940 
7941 	}
7942 }
7943 
7944 static struct drm_display_mode *
7945 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7946 			     char *name,
7947 			     int hdisplay, int vdisplay)
7948 {
7949 	struct drm_device *dev = encoder->dev;
7950 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7951 	struct drm_display_mode *mode = NULL;
7952 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7953 
7954 	mode = drm_mode_duplicate(dev, native_mode);
7955 
7956 	if (mode == NULL)
7957 		return NULL;
7958 
7959 	mode->hdisplay = hdisplay;
7960 	mode->vdisplay = vdisplay;
7961 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7962 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7963 
7964 	return mode;
7965 
7966 }
7967 
7968 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7969 						 struct drm_connector *connector)
7970 {
7971 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7972 	struct drm_display_mode *mode = NULL;
7973 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7974 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7975 				to_amdgpu_dm_connector(connector);
7976 	int i;
7977 	int n;
7978 	struct mode_size {
7979 		char name[DRM_DISPLAY_MODE_LEN];
7980 		int w;
7981 		int h;
7982 	} common_modes[] = {
7983 		{  "640x480",  640,  480},
7984 		{  "800x600",  800,  600},
7985 		{ "1024x768", 1024,  768},
7986 		{ "1280x720", 1280,  720},
7987 		{ "1280x800", 1280,  800},
7988 		{"1280x1024", 1280, 1024},
7989 		{ "1440x900", 1440,  900},
7990 		{"1680x1050", 1680, 1050},
7991 		{"1600x1200", 1600, 1200},
7992 		{"1920x1080", 1920, 1080},
7993 		{"1920x1200", 1920, 1200}
7994 	};
7995 
7996 	n = ARRAY_SIZE(common_modes);
7997 
7998 	for (i = 0; i < n; i++) {
7999 		struct drm_display_mode *curmode = NULL;
8000 		bool mode_existed = false;
8001 
8002 		if (common_modes[i].w > native_mode->hdisplay ||
8003 		    common_modes[i].h > native_mode->vdisplay ||
8004 		   (common_modes[i].w == native_mode->hdisplay &&
8005 		    common_modes[i].h == native_mode->vdisplay))
8006 			continue;
8007 
8008 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8009 			if (common_modes[i].w == curmode->hdisplay &&
8010 			    common_modes[i].h == curmode->vdisplay) {
8011 				mode_existed = true;
8012 				break;
8013 			}
8014 		}
8015 
8016 		if (mode_existed)
8017 			continue;
8018 
8019 		mode = amdgpu_dm_create_common_mode(encoder,
8020 				common_modes[i].name, common_modes[i].w,
8021 				common_modes[i].h);
8022 		drm_mode_probed_add(connector, mode);
8023 		amdgpu_dm_connector->num_modes++;
8024 	}
8025 }
8026 
8027 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8028 {
8029 	struct drm_encoder *encoder;
8030 	struct amdgpu_encoder *amdgpu_encoder;
8031 	const struct drm_display_mode *native_mode;
8032 
8033 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8034 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8035 		return;
8036 
8037 	encoder = amdgpu_dm_connector_to_encoder(connector);
8038 	if (!encoder)
8039 		return;
8040 
8041 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8042 
8043 	native_mode = &amdgpu_encoder->native_mode;
8044 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8045 		return;
8046 
8047 	drm_connector_set_panel_orientation_with_quirk(connector,
8048 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8049 						       native_mode->hdisplay,
8050 						       native_mode->vdisplay);
8051 }
8052 
8053 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8054 					      struct edid *edid)
8055 {
8056 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8057 			to_amdgpu_dm_connector(connector);
8058 
8059 	if (edid) {
8060 		/* empty probed_modes */
8061 		INIT_LIST_HEAD(&connector->probed_modes);
8062 		amdgpu_dm_connector->num_modes =
8063 				drm_add_edid_modes(connector, edid);
8064 
8065 		/* sorting the probed modes before calling function
8066 		 * amdgpu_dm_get_native_mode() since EDID can have
8067 		 * more than one preferred mode. The modes that are
8068 		 * later in the probed mode list could be of higher
8069 		 * and preferred resolution. For example, 3840x2160
8070 		 * resolution in base EDID preferred timing and 4096x2160
8071 		 * preferred resolution in DID extension block later.
8072 		 */
8073 		drm_mode_sort(&connector->probed_modes);
8074 		amdgpu_dm_get_native_mode(connector);
8075 
8076 		/* Freesync capabilities are reset by calling
8077 		 * drm_add_edid_modes() and need to be
8078 		 * restored here.
8079 		 */
8080 		amdgpu_dm_update_freesync_caps(connector, edid);
8081 
8082 		amdgpu_set_panel_orientation(connector);
8083 	} else {
8084 		amdgpu_dm_connector->num_modes = 0;
8085 	}
8086 }
8087 
8088 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8089 			      struct drm_display_mode *mode)
8090 {
8091 	struct drm_display_mode *m;
8092 
8093 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8094 		if (drm_mode_equal(m, mode))
8095 			return true;
8096 	}
8097 
8098 	return false;
8099 }
8100 
8101 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8102 {
8103 	const struct drm_display_mode *m;
8104 	struct drm_display_mode *new_mode;
8105 	uint i;
8106 	uint32_t new_modes_count = 0;
8107 
8108 	/* Standard FPS values
8109 	 *
8110 	 * 23.976       - TV/NTSC
8111 	 * 24 	        - Cinema
8112 	 * 25 	        - TV/PAL
8113 	 * 29.97        - TV/NTSC
8114 	 * 30 	        - TV/NTSC
8115 	 * 48 	        - Cinema HFR
8116 	 * 50 	        - TV/PAL
8117 	 * 60 	        - Commonly used
8118 	 * 48,72,96,120 - Multiples of 24
8119 	 */
8120 	static const uint32_t common_rates[] = {
8121 		23976, 24000, 25000, 29970, 30000,
8122 		48000, 50000, 60000, 72000, 96000, 120000
8123 	};
8124 
8125 	/*
8126 	 * Find mode with highest refresh rate with the same resolution
8127 	 * as the preferred mode. Some monitors report a preferred mode
8128 	 * with lower resolution than the highest refresh rate supported.
8129 	 */
8130 
8131 	m = get_highest_refresh_rate_mode(aconnector, true);
8132 	if (!m)
8133 		return 0;
8134 
8135 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8136 		uint64_t target_vtotal, target_vtotal_diff;
8137 		uint64_t num, den;
8138 
8139 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8140 			continue;
8141 
8142 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8143 		    common_rates[i] > aconnector->max_vfreq * 1000)
8144 			continue;
8145 
8146 		num = (unsigned long long)m->clock * 1000 * 1000;
8147 		den = common_rates[i] * (unsigned long long)m->htotal;
8148 		target_vtotal = div_u64(num, den);
8149 		target_vtotal_diff = target_vtotal - m->vtotal;
8150 
8151 		/* Check for illegal modes */
8152 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8153 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8154 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8155 			continue;
8156 
8157 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8158 		if (!new_mode)
8159 			goto out;
8160 
8161 		new_mode->vtotal += (u16)target_vtotal_diff;
8162 		new_mode->vsync_start += (u16)target_vtotal_diff;
8163 		new_mode->vsync_end += (u16)target_vtotal_diff;
8164 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8165 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8166 
8167 		if (!is_duplicate_mode(aconnector, new_mode)) {
8168 			drm_mode_probed_add(&aconnector->base, new_mode);
8169 			new_modes_count += 1;
8170 		} else
8171 			drm_mode_destroy(aconnector->base.dev, new_mode);
8172 	}
8173  out:
8174 	return new_modes_count;
8175 }
8176 
8177 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8178 						   struct edid *edid)
8179 {
8180 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8181 		to_amdgpu_dm_connector(connector);
8182 
8183 	if (!(amdgpu_freesync_vid_mode && edid))
8184 		return;
8185 
8186 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8187 		amdgpu_dm_connector->num_modes +=
8188 			add_fs_modes(amdgpu_dm_connector);
8189 }
8190 
8191 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8192 {
8193 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8194 			to_amdgpu_dm_connector(connector);
8195 	struct drm_encoder *encoder;
8196 	struct edid *edid = amdgpu_dm_connector->edid;
8197 
8198 	encoder = amdgpu_dm_connector_to_encoder(connector);
8199 
8200 	if (!drm_edid_is_valid(edid)) {
8201 		amdgpu_dm_connector->num_modes =
8202 				drm_add_modes_noedid(connector, 640, 480);
8203 	} else {
8204 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8205 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8206 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8207 	}
8208 	amdgpu_dm_fbc_init(connector);
8209 
8210 	return amdgpu_dm_connector->num_modes;
8211 }
8212 
8213 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8214 				     struct amdgpu_dm_connector *aconnector,
8215 				     int connector_type,
8216 				     struct dc_link *link,
8217 				     int link_index)
8218 {
8219 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8220 
8221 	/*
8222 	 * Some of the properties below require access to state, like bpc.
8223 	 * Allocate some default initial connector state with our reset helper.
8224 	 */
8225 	if (aconnector->base.funcs->reset)
8226 		aconnector->base.funcs->reset(&aconnector->base);
8227 
8228 	aconnector->connector_id = link_index;
8229 	aconnector->dc_link = link;
8230 	aconnector->base.interlace_allowed = false;
8231 	aconnector->base.doublescan_allowed = false;
8232 	aconnector->base.stereo_allowed = false;
8233 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8234 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8235 	aconnector->audio_inst = -1;
8236 	mutex_init(&aconnector->hpd_lock);
8237 
8238 	/*
8239 	 * configure support HPD hot plug connector_>polled default value is 0
8240 	 * which means HPD hot plug not supported
8241 	 */
8242 	switch (connector_type) {
8243 	case DRM_MODE_CONNECTOR_HDMIA:
8244 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8245 		aconnector->base.ycbcr_420_allowed =
8246 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8247 		break;
8248 	case DRM_MODE_CONNECTOR_DisplayPort:
8249 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8250 		if (link->is_dig_mapping_flexible &&
8251 		    link->dc->res_pool->funcs->link_encs_assign) {
8252 			link->link_enc =
8253 				link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8254 			if (!link->link_enc)
8255 				link->link_enc =
8256 					link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8257 		}
8258 
8259 		if (link->link_enc)
8260 			aconnector->base.ycbcr_420_allowed =
8261 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8262 		break;
8263 	case DRM_MODE_CONNECTOR_DVID:
8264 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8265 		break;
8266 	default:
8267 		break;
8268 	}
8269 
8270 	drm_object_attach_property(&aconnector->base.base,
8271 				dm->ddev->mode_config.scaling_mode_property,
8272 				DRM_MODE_SCALE_NONE);
8273 
8274 	drm_object_attach_property(&aconnector->base.base,
8275 				adev->mode_info.underscan_property,
8276 				UNDERSCAN_OFF);
8277 	drm_object_attach_property(&aconnector->base.base,
8278 				adev->mode_info.underscan_hborder_property,
8279 				0);
8280 	drm_object_attach_property(&aconnector->base.base,
8281 				adev->mode_info.underscan_vborder_property,
8282 				0);
8283 
8284 	if (!aconnector->mst_port)
8285 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8286 
8287 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8288 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8289 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8290 
8291 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8292 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8293 		drm_object_attach_property(&aconnector->base.base,
8294 				adev->mode_info.abm_level_property, 0);
8295 	}
8296 
8297 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8298 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8299 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8300 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8301 
8302 		if (!aconnector->mst_port)
8303 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8304 
8305 #ifdef CONFIG_DRM_AMD_DC_HDCP
8306 		if (adev->dm.hdcp_workqueue)
8307 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8308 #endif
8309 	}
8310 }
8311 
8312 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8313 			      struct i2c_msg *msgs, int num)
8314 {
8315 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8316 	struct ddc_service *ddc_service = i2c->ddc_service;
8317 	struct i2c_command cmd;
8318 	int i;
8319 	int result = -EIO;
8320 
8321 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8322 
8323 	if (!cmd.payloads)
8324 		return result;
8325 
8326 	cmd.number_of_payloads = num;
8327 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8328 	cmd.speed = 100;
8329 
8330 	for (i = 0; i < num; i++) {
8331 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8332 		cmd.payloads[i].address = msgs[i].addr;
8333 		cmd.payloads[i].length = msgs[i].len;
8334 		cmd.payloads[i].data = msgs[i].buf;
8335 	}
8336 
8337 	if (dc_submit_i2c(
8338 			ddc_service->ctx->dc,
8339 			ddc_service->ddc_pin->hw_info.ddc_channel,
8340 			&cmd))
8341 		result = num;
8342 
8343 	kfree(cmd.payloads);
8344 	return result;
8345 }
8346 
8347 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8348 {
8349 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8350 }
8351 
8352 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8353 	.master_xfer = amdgpu_dm_i2c_xfer,
8354 	.functionality = amdgpu_dm_i2c_func,
8355 };
8356 
8357 static struct amdgpu_i2c_adapter *
8358 create_i2c(struct ddc_service *ddc_service,
8359 	   int link_index,
8360 	   int *res)
8361 {
8362 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8363 	struct amdgpu_i2c_adapter *i2c;
8364 
8365 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8366 	if (!i2c)
8367 		return NULL;
8368 	i2c->base.owner = THIS_MODULE;
8369 	i2c->base.class = I2C_CLASS_DDC;
8370 	i2c->base.dev.parent = &adev->pdev->dev;
8371 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8372 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8373 	i2c_set_adapdata(&i2c->base, i2c);
8374 	i2c->ddc_service = ddc_service;
8375 	if (i2c->ddc_service->ddc_pin)
8376 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8377 
8378 	return i2c;
8379 }
8380 
8381 
8382 /*
8383  * Note: this function assumes that dc_link_detect() was called for the
8384  * dc_link which will be represented by this aconnector.
8385  */
8386 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8387 				    struct amdgpu_dm_connector *aconnector,
8388 				    uint32_t link_index,
8389 				    struct amdgpu_encoder *aencoder)
8390 {
8391 	int res = 0;
8392 	int connector_type;
8393 	struct dc *dc = dm->dc;
8394 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8395 	struct amdgpu_i2c_adapter *i2c;
8396 
8397 	link->priv = aconnector;
8398 
8399 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8400 
8401 	i2c = create_i2c(link->ddc, link->link_index, &res);
8402 	if (!i2c) {
8403 		DRM_ERROR("Failed to create i2c adapter data\n");
8404 		return -ENOMEM;
8405 	}
8406 
8407 	aconnector->i2c = i2c;
8408 	res = i2c_add_adapter(&i2c->base);
8409 
8410 	if (res) {
8411 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8412 		goto out_free;
8413 	}
8414 
8415 	connector_type = to_drm_connector_type(link->connector_signal);
8416 
8417 	res = drm_connector_init_with_ddc(
8418 			dm->ddev,
8419 			&aconnector->base,
8420 			&amdgpu_dm_connector_funcs,
8421 			connector_type,
8422 			&i2c->base);
8423 
8424 	if (res) {
8425 		DRM_ERROR("connector_init failed\n");
8426 		aconnector->connector_id = -1;
8427 		goto out_free;
8428 	}
8429 
8430 	drm_connector_helper_add(
8431 			&aconnector->base,
8432 			&amdgpu_dm_connector_helper_funcs);
8433 
8434 	amdgpu_dm_connector_init_helper(
8435 		dm,
8436 		aconnector,
8437 		connector_type,
8438 		link,
8439 		link_index);
8440 
8441 	drm_connector_attach_encoder(
8442 		&aconnector->base, &aencoder->base);
8443 
8444 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8445 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8446 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8447 
8448 out_free:
8449 	if (res) {
8450 		kfree(i2c);
8451 		aconnector->i2c = NULL;
8452 	}
8453 	return res;
8454 }
8455 
8456 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8457 {
8458 	switch (adev->mode_info.num_crtc) {
8459 	case 1:
8460 		return 0x1;
8461 	case 2:
8462 		return 0x3;
8463 	case 3:
8464 		return 0x7;
8465 	case 4:
8466 		return 0xf;
8467 	case 5:
8468 		return 0x1f;
8469 	case 6:
8470 	default:
8471 		return 0x3f;
8472 	}
8473 }
8474 
8475 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8476 				  struct amdgpu_encoder *aencoder,
8477 				  uint32_t link_index)
8478 {
8479 	struct amdgpu_device *adev = drm_to_adev(dev);
8480 
8481 	int res = drm_encoder_init(dev,
8482 				   &aencoder->base,
8483 				   &amdgpu_dm_encoder_funcs,
8484 				   DRM_MODE_ENCODER_TMDS,
8485 				   NULL);
8486 
8487 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8488 
8489 	if (!res)
8490 		aencoder->encoder_id = link_index;
8491 	else
8492 		aencoder->encoder_id = -1;
8493 
8494 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8495 
8496 	return res;
8497 }
8498 
8499 static void manage_dm_interrupts(struct amdgpu_device *adev,
8500 				 struct amdgpu_crtc *acrtc,
8501 				 bool enable)
8502 {
8503 	/*
8504 	 * We have no guarantee that the frontend index maps to the same
8505 	 * backend index - some even map to more than one.
8506 	 *
8507 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8508 	 */
8509 	int irq_type =
8510 		amdgpu_display_crtc_idx_to_irq_type(
8511 			adev,
8512 			acrtc->crtc_id);
8513 
8514 	if (enable) {
8515 		drm_crtc_vblank_on(&acrtc->base);
8516 		amdgpu_irq_get(
8517 			adev,
8518 			&adev->pageflip_irq,
8519 			irq_type);
8520 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8521 		amdgpu_irq_get(
8522 			adev,
8523 			&adev->vline0_irq,
8524 			irq_type);
8525 #endif
8526 	} else {
8527 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8528 		amdgpu_irq_put(
8529 			adev,
8530 			&adev->vline0_irq,
8531 			irq_type);
8532 #endif
8533 		amdgpu_irq_put(
8534 			adev,
8535 			&adev->pageflip_irq,
8536 			irq_type);
8537 		drm_crtc_vblank_off(&acrtc->base);
8538 	}
8539 }
8540 
8541 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8542 				      struct amdgpu_crtc *acrtc)
8543 {
8544 	int irq_type =
8545 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8546 
8547 	/**
8548 	 * This reads the current state for the IRQ and force reapplies
8549 	 * the setting to hardware.
8550 	 */
8551 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8552 }
8553 
8554 static bool
8555 is_scaling_state_different(const struct dm_connector_state *dm_state,
8556 			   const struct dm_connector_state *old_dm_state)
8557 {
8558 	if (dm_state->scaling != old_dm_state->scaling)
8559 		return true;
8560 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8561 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8562 			return true;
8563 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8564 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8565 			return true;
8566 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8567 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8568 		return true;
8569 	return false;
8570 }
8571 
8572 #ifdef CONFIG_DRM_AMD_DC_HDCP
8573 static bool is_content_protection_different(struct drm_connector_state *state,
8574 					    const struct drm_connector_state *old_state,
8575 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8576 {
8577 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8578 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8579 
8580 	/* Handle: Type0/1 change */
8581 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8582 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8583 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8584 		return true;
8585 	}
8586 
8587 	/* CP is being re enabled, ignore this
8588 	 *
8589 	 * Handles:	ENABLED -> DESIRED
8590 	 */
8591 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8592 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8593 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8594 		return false;
8595 	}
8596 
8597 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8598 	 *
8599 	 * Handles:	UNDESIRED -> ENABLED
8600 	 */
8601 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8602 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8603 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8604 
8605 	/* Stream removed and re-enabled
8606 	 *
8607 	 * Can sometimes overlap with the HPD case,
8608 	 * thus set update_hdcp to false to avoid
8609 	 * setting HDCP multiple times.
8610 	 *
8611 	 * Handles:	DESIRED -> DESIRED (Special case)
8612 	 */
8613 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8614 		state->crtc && state->crtc->enabled &&
8615 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8616 		dm_con_state->update_hdcp = false;
8617 		return true;
8618 	}
8619 
8620 	/* Hot-plug, headless s3, dpms
8621 	 *
8622 	 * Only start HDCP if the display is connected/enabled.
8623 	 * update_hdcp flag will be set to false until the next
8624 	 * HPD comes in.
8625 	 *
8626 	 * Handles:	DESIRED -> DESIRED (Special case)
8627 	 */
8628 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8629 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8630 		dm_con_state->update_hdcp = false;
8631 		return true;
8632 	}
8633 
8634 	/*
8635 	 * Handles:	UNDESIRED -> UNDESIRED
8636 	 *		DESIRED -> DESIRED
8637 	 *		ENABLED -> ENABLED
8638 	 */
8639 	if (old_state->content_protection == state->content_protection)
8640 		return false;
8641 
8642 	/*
8643 	 * Handles:	UNDESIRED -> DESIRED
8644 	 *		DESIRED -> UNDESIRED
8645 	 *		ENABLED -> UNDESIRED
8646 	 */
8647 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8648 		return true;
8649 
8650 	/*
8651 	 * Handles:	DESIRED -> ENABLED
8652 	 */
8653 	return false;
8654 }
8655 
8656 #endif
8657 static void remove_stream(struct amdgpu_device *adev,
8658 			  struct amdgpu_crtc *acrtc,
8659 			  struct dc_stream_state *stream)
8660 {
8661 	/* this is the update mode case */
8662 
8663 	acrtc->otg_inst = -1;
8664 	acrtc->enabled = false;
8665 }
8666 
8667 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8668 			       struct dc_cursor_position *position)
8669 {
8670 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8671 	int x, y;
8672 	int xorigin = 0, yorigin = 0;
8673 
8674 	if (!crtc || !plane->state->fb)
8675 		return 0;
8676 
8677 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8678 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8679 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8680 			  __func__,
8681 			  plane->state->crtc_w,
8682 			  plane->state->crtc_h);
8683 		return -EINVAL;
8684 	}
8685 
8686 	x = plane->state->crtc_x;
8687 	y = plane->state->crtc_y;
8688 
8689 	if (x <= -amdgpu_crtc->max_cursor_width ||
8690 	    y <= -amdgpu_crtc->max_cursor_height)
8691 		return 0;
8692 
8693 	if (x < 0) {
8694 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8695 		x = 0;
8696 	}
8697 	if (y < 0) {
8698 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8699 		y = 0;
8700 	}
8701 	position->enable = true;
8702 	position->translate_by_source = true;
8703 	position->x = x;
8704 	position->y = y;
8705 	position->x_hotspot = xorigin;
8706 	position->y_hotspot = yorigin;
8707 
8708 	return 0;
8709 }
8710 
8711 static void handle_cursor_update(struct drm_plane *plane,
8712 				 struct drm_plane_state *old_plane_state)
8713 {
8714 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8715 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8716 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8717 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8718 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8719 	uint64_t address = afb ? afb->address : 0;
8720 	struct dc_cursor_position position = {0};
8721 	struct dc_cursor_attributes attributes;
8722 	int ret;
8723 
8724 	if (!plane->state->fb && !old_plane_state->fb)
8725 		return;
8726 
8727 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8728 		      __func__,
8729 		      amdgpu_crtc->crtc_id,
8730 		      plane->state->crtc_w,
8731 		      plane->state->crtc_h);
8732 
8733 	ret = get_cursor_position(plane, crtc, &position);
8734 	if (ret)
8735 		return;
8736 
8737 	if (!position.enable) {
8738 		/* turn off cursor */
8739 		if (crtc_state && crtc_state->stream) {
8740 			mutex_lock(&adev->dm.dc_lock);
8741 			dc_stream_set_cursor_position(crtc_state->stream,
8742 						      &position);
8743 			mutex_unlock(&adev->dm.dc_lock);
8744 		}
8745 		return;
8746 	}
8747 
8748 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8749 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8750 
8751 	memset(&attributes, 0, sizeof(attributes));
8752 	attributes.address.high_part = upper_32_bits(address);
8753 	attributes.address.low_part  = lower_32_bits(address);
8754 	attributes.width             = plane->state->crtc_w;
8755 	attributes.height            = plane->state->crtc_h;
8756 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8757 	attributes.rotation_angle    = 0;
8758 	attributes.attribute_flags.value = 0;
8759 
8760 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8761 
8762 	if (crtc_state->stream) {
8763 		mutex_lock(&adev->dm.dc_lock);
8764 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8765 							 &attributes))
8766 			DRM_ERROR("DC failed to set cursor attributes\n");
8767 
8768 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8769 						   &position))
8770 			DRM_ERROR("DC failed to set cursor position\n");
8771 		mutex_unlock(&adev->dm.dc_lock);
8772 	}
8773 }
8774 
8775 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8776 {
8777 
8778 	assert_spin_locked(&acrtc->base.dev->event_lock);
8779 	WARN_ON(acrtc->event);
8780 
8781 	acrtc->event = acrtc->base.state->event;
8782 
8783 	/* Set the flip status */
8784 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8785 
8786 	/* Mark this event as consumed */
8787 	acrtc->base.state->event = NULL;
8788 
8789 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8790 		     acrtc->crtc_id);
8791 }
8792 
8793 static void update_freesync_state_on_stream(
8794 	struct amdgpu_display_manager *dm,
8795 	struct dm_crtc_state *new_crtc_state,
8796 	struct dc_stream_state *new_stream,
8797 	struct dc_plane_state *surface,
8798 	u32 flip_timestamp_in_us)
8799 {
8800 	struct mod_vrr_params vrr_params;
8801 	struct dc_info_packet vrr_infopacket = {0};
8802 	struct amdgpu_device *adev = dm->adev;
8803 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8804 	unsigned long flags;
8805 	bool pack_sdp_v1_3 = false;
8806 
8807 	if (!new_stream)
8808 		return;
8809 
8810 	/*
8811 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8812 	 * For now it's sufficient to just guard against these conditions.
8813 	 */
8814 
8815 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8816 		return;
8817 
8818 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8819         vrr_params = acrtc->dm_irq_params.vrr_params;
8820 
8821 	if (surface) {
8822 		mod_freesync_handle_preflip(
8823 			dm->freesync_module,
8824 			surface,
8825 			new_stream,
8826 			flip_timestamp_in_us,
8827 			&vrr_params);
8828 
8829 		if (adev->family < AMDGPU_FAMILY_AI &&
8830 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8831 			mod_freesync_handle_v_update(dm->freesync_module,
8832 						     new_stream, &vrr_params);
8833 
8834 			/* Need to call this before the frame ends. */
8835 			dc_stream_adjust_vmin_vmax(dm->dc,
8836 						   new_crtc_state->stream,
8837 						   &vrr_params.adjust);
8838 		}
8839 	}
8840 
8841 	mod_freesync_build_vrr_infopacket(
8842 		dm->freesync_module,
8843 		new_stream,
8844 		&vrr_params,
8845 		PACKET_TYPE_VRR,
8846 		TRANSFER_FUNC_UNKNOWN,
8847 		&vrr_infopacket,
8848 		pack_sdp_v1_3);
8849 
8850 	new_crtc_state->freesync_timing_changed |=
8851 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8852 			&vrr_params.adjust,
8853 			sizeof(vrr_params.adjust)) != 0);
8854 
8855 	new_crtc_state->freesync_vrr_info_changed |=
8856 		(memcmp(&new_crtc_state->vrr_infopacket,
8857 			&vrr_infopacket,
8858 			sizeof(vrr_infopacket)) != 0);
8859 
8860 	acrtc->dm_irq_params.vrr_params = vrr_params;
8861 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8862 
8863 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8864 	new_stream->vrr_infopacket = vrr_infopacket;
8865 
8866 	if (new_crtc_state->freesync_vrr_info_changed)
8867 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8868 			      new_crtc_state->base.crtc->base.id,
8869 			      (int)new_crtc_state->base.vrr_enabled,
8870 			      (int)vrr_params.state);
8871 
8872 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8873 }
8874 
8875 static void update_stream_irq_parameters(
8876 	struct amdgpu_display_manager *dm,
8877 	struct dm_crtc_state *new_crtc_state)
8878 {
8879 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8880 	struct mod_vrr_params vrr_params;
8881 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8882 	struct amdgpu_device *adev = dm->adev;
8883 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8884 	unsigned long flags;
8885 
8886 	if (!new_stream)
8887 		return;
8888 
8889 	/*
8890 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8891 	 * For now it's sufficient to just guard against these conditions.
8892 	 */
8893 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8894 		return;
8895 
8896 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8897 	vrr_params = acrtc->dm_irq_params.vrr_params;
8898 
8899 	if (new_crtc_state->vrr_supported &&
8900 	    config.min_refresh_in_uhz &&
8901 	    config.max_refresh_in_uhz) {
8902 		/*
8903 		 * if freesync compatible mode was set, config.state will be set
8904 		 * in atomic check
8905 		 */
8906 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8907 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8908 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8909 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8910 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8911 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8912 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8913 		} else {
8914 			config.state = new_crtc_state->base.vrr_enabled ?
8915 						     VRR_STATE_ACTIVE_VARIABLE :
8916 						     VRR_STATE_INACTIVE;
8917 		}
8918 	} else {
8919 		config.state = VRR_STATE_UNSUPPORTED;
8920 	}
8921 
8922 	mod_freesync_build_vrr_params(dm->freesync_module,
8923 				      new_stream,
8924 				      &config, &vrr_params);
8925 
8926 	new_crtc_state->freesync_timing_changed |=
8927 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8928 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8929 
8930 	new_crtc_state->freesync_config = config;
8931 	/* Copy state for access from DM IRQ handler */
8932 	acrtc->dm_irq_params.freesync_config = config;
8933 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8934 	acrtc->dm_irq_params.vrr_params = vrr_params;
8935 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8936 }
8937 
8938 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8939 					    struct dm_crtc_state *new_state)
8940 {
8941 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8942 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8943 
8944 	if (!old_vrr_active && new_vrr_active) {
8945 		/* Transition VRR inactive -> active:
8946 		 * While VRR is active, we must not disable vblank irq, as a
8947 		 * reenable after disable would compute bogus vblank/pflip
8948 		 * timestamps if it likely happened inside display front-porch.
8949 		 *
8950 		 * We also need vupdate irq for the actual core vblank handling
8951 		 * at end of vblank.
8952 		 */
8953 		dm_set_vupdate_irq(new_state->base.crtc, true);
8954 		drm_crtc_vblank_get(new_state->base.crtc);
8955 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8956 				 __func__, new_state->base.crtc->base.id);
8957 	} else if (old_vrr_active && !new_vrr_active) {
8958 		/* Transition VRR active -> inactive:
8959 		 * Allow vblank irq disable again for fixed refresh rate.
8960 		 */
8961 		dm_set_vupdate_irq(new_state->base.crtc, false);
8962 		drm_crtc_vblank_put(new_state->base.crtc);
8963 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8964 				 __func__, new_state->base.crtc->base.id);
8965 	}
8966 }
8967 
8968 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8969 {
8970 	struct drm_plane *plane;
8971 	struct drm_plane_state *old_plane_state;
8972 	int i;
8973 
8974 	/*
8975 	 * TODO: Make this per-stream so we don't issue redundant updates for
8976 	 * commits with multiple streams.
8977 	 */
8978 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8979 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8980 			handle_cursor_update(plane, old_plane_state);
8981 }
8982 
8983 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8984 				    struct dc_state *dc_state,
8985 				    struct drm_device *dev,
8986 				    struct amdgpu_display_manager *dm,
8987 				    struct drm_crtc *pcrtc,
8988 				    bool wait_for_vblank)
8989 {
8990 	uint32_t i;
8991 	uint64_t timestamp_ns;
8992 	struct drm_plane *plane;
8993 	struct drm_plane_state *old_plane_state, *new_plane_state;
8994 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8995 	struct drm_crtc_state *new_pcrtc_state =
8996 			drm_atomic_get_new_crtc_state(state, pcrtc);
8997 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8998 	struct dm_crtc_state *dm_old_crtc_state =
8999 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9000 	int planes_count = 0, vpos, hpos;
9001 	long r;
9002 	unsigned long flags;
9003 	struct amdgpu_bo *abo;
9004 	uint32_t target_vblank, last_flip_vblank;
9005 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9006 	bool pflip_present = false;
9007 	struct {
9008 		struct dc_surface_update surface_updates[MAX_SURFACES];
9009 		struct dc_plane_info plane_infos[MAX_SURFACES];
9010 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9011 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9012 		struct dc_stream_update stream_update;
9013 	} *bundle;
9014 
9015 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9016 
9017 	if (!bundle) {
9018 		dm_error("Failed to allocate update bundle\n");
9019 		goto cleanup;
9020 	}
9021 
9022 	/*
9023 	 * Disable the cursor first if we're disabling all the planes.
9024 	 * It'll remain on the screen after the planes are re-enabled
9025 	 * if we don't.
9026 	 */
9027 	if (acrtc_state->active_planes == 0)
9028 		amdgpu_dm_commit_cursors(state);
9029 
9030 	/* update planes when needed */
9031 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9032 		struct drm_crtc *crtc = new_plane_state->crtc;
9033 		struct drm_crtc_state *new_crtc_state;
9034 		struct drm_framebuffer *fb = new_plane_state->fb;
9035 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9036 		bool plane_needs_flip;
9037 		struct dc_plane_state *dc_plane;
9038 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9039 
9040 		/* Cursor plane is handled after stream updates */
9041 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9042 			continue;
9043 
9044 		if (!fb || !crtc || pcrtc != crtc)
9045 			continue;
9046 
9047 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9048 		if (!new_crtc_state->active)
9049 			continue;
9050 
9051 		dc_plane = dm_new_plane_state->dc_state;
9052 
9053 		bundle->surface_updates[planes_count].surface = dc_plane;
9054 		if (new_pcrtc_state->color_mgmt_changed) {
9055 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9056 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9057 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9058 		}
9059 
9060 		fill_dc_scaling_info(dm->adev, new_plane_state,
9061 				     &bundle->scaling_infos[planes_count]);
9062 
9063 		bundle->surface_updates[planes_count].scaling_info =
9064 			&bundle->scaling_infos[planes_count];
9065 
9066 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9067 
9068 		pflip_present = pflip_present || plane_needs_flip;
9069 
9070 		if (!plane_needs_flip) {
9071 			planes_count += 1;
9072 			continue;
9073 		}
9074 
9075 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9076 
9077 		/*
9078 		 * Wait for all fences on this FB. Do limited wait to avoid
9079 		 * deadlock during GPU reset when this fence will not signal
9080 		 * but we hold reservation lock for the BO.
9081 		 */
9082 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9083 					  msecs_to_jiffies(5000));
9084 		if (unlikely(r <= 0))
9085 			DRM_ERROR("Waiting for fences timed out!");
9086 
9087 		fill_dc_plane_info_and_addr(
9088 			dm->adev, new_plane_state,
9089 			afb->tiling_flags,
9090 			&bundle->plane_infos[planes_count],
9091 			&bundle->flip_addrs[planes_count].address,
9092 			afb->tmz_surface, false);
9093 
9094 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9095 				 new_plane_state->plane->index,
9096 				 bundle->plane_infos[planes_count].dcc.enable);
9097 
9098 		bundle->surface_updates[planes_count].plane_info =
9099 			&bundle->plane_infos[planes_count];
9100 
9101 		/*
9102 		 * Only allow immediate flips for fast updates that don't
9103 		 * change FB pitch, DCC state, rotation or mirroing.
9104 		 */
9105 		bundle->flip_addrs[planes_count].flip_immediate =
9106 			crtc->state->async_flip &&
9107 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9108 
9109 		timestamp_ns = ktime_get_ns();
9110 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9111 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9112 		bundle->surface_updates[planes_count].surface = dc_plane;
9113 
9114 		if (!bundle->surface_updates[planes_count].surface) {
9115 			DRM_ERROR("No surface for CRTC: id=%d\n",
9116 					acrtc_attach->crtc_id);
9117 			continue;
9118 		}
9119 
9120 		if (plane == pcrtc->primary)
9121 			update_freesync_state_on_stream(
9122 				dm,
9123 				acrtc_state,
9124 				acrtc_state->stream,
9125 				dc_plane,
9126 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9127 
9128 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9129 				 __func__,
9130 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9131 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9132 
9133 		planes_count += 1;
9134 
9135 	}
9136 
9137 	if (pflip_present) {
9138 		if (!vrr_active) {
9139 			/* Use old throttling in non-vrr fixed refresh rate mode
9140 			 * to keep flip scheduling based on target vblank counts
9141 			 * working in a backwards compatible way, e.g., for
9142 			 * clients using the GLX_OML_sync_control extension or
9143 			 * DRI3/Present extension with defined target_msc.
9144 			 */
9145 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9146 		}
9147 		else {
9148 			/* For variable refresh rate mode only:
9149 			 * Get vblank of last completed flip to avoid > 1 vrr
9150 			 * flips per video frame by use of throttling, but allow
9151 			 * flip programming anywhere in the possibly large
9152 			 * variable vrr vblank interval for fine-grained flip
9153 			 * timing control and more opportunity to avoid stutter
9154 			 * on late submission of flips.
9155 			 */
9156 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9157 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9158 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9159 		}
9160 
9161 		target_vblank = last_flip_vblank + wait_for_vblank;
9162 
9163 		/*
9164 		 * Wait until we're out of the vertical blank period before the one
9165 		 * targeted by the flip
9166 		 */
9167 		while ((acrtc_attach->enabled &&
9168 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9169 							    0, &vpos, &hpos, NULL,
9170 							    NULL, &pcrtc->hwmode)
9171 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9172 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9173 			(int)(target_vblank -
9174 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9175 			usleep_range(1000, 1100);
9176 		}
9177 
9178 		/**
9179 		 * Prepare the flip event for the pageflip interrupt to handle.
9180 		 *
9181 		 * This only works in the case where we've already turned on the
9182 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9183 		 * from 0 -> n planes we have to skip a hardware generated event
9184 		 * and rely on sending it from software.
9185 		 */
9186 		if (acrtc_attach->base.state->event &&
9187 		    acrtc_state->active_planes > 0 &&
9188 		    !acrtc_state->force_dpms_off) {
9189 			drm_crtc_vblank_get(pcrtc);
9190 
9191 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9192 
9193 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9194 			prepare_flip_isr(acrtc_attach);
9195 
9196 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9197 		}
9198 
9199 		if (acrtc_state->stream) {
9200 			if (acrtc_state->freesync_vrr_info_changed)
9201 				bundle->stream_update.vrr_infopacket =
9202 					&acrtc_state->stream->vrr_infopacket;
9203 		}
9204 	}
9205 
9206 	/* Update the planes if changed or disable if we don't have any. */
9207 	if ((planes_count || acrtc_state->active_planes == 0) &&
9208 		acrtc_state->stream) {
9209 #if defined(CONFIG_DRM_AMD_DC_DCN)
9210 		/*
9211 		 * If PSR or idle optimizations are enabled then flush out
9212 		 * any pending work before hardware programming.
9213 		 */
9214 		if (dm->vblank_control_workqueue)
9215 			flush_workqueue(dm->vblank_control_workqueue);
9216 #endif
9217 
9218 		bundle->stream_update.stream = acrtc_state->stream;
9219 		if (new_pcrtc_state->mode_changed) {
9220 			bundle->stream_update.src = acrtc_state->stream->src;
9221 			bundle->stream_update.dst = acrtc_state->stream->dst;
9222 		}
9223 
9224 		if (new_pcrtc_state->color_mgmt_changed) {
9225 			/*
9226 			 * TODO: This isn't fully correct since we've actually
9227 			 * already modified the stream in place.
9228 			 */
9229 			bundle->stream_update.gamut_remap =
9230 				&acrtc_state->stream->gamut_remap_matrix;
9231 			bundle->stream_update.output_csc_transform =
9232 				&acrtc_state->stream->csc_color_matrix;
9233 			bundle->stream_update.out_transfer_func =
9234 				acrtc_state->stream->out_transfer_func;
9235 		}
9236 
9237 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9238 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9239 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9240 
9241 		/*
9242 		 * If FreeSync state on the stream has changed then we need to
9243 		 * re-adjust the min/max bounds now that DC doesn't handle this
9244 		 * as part of commit.
9245 		 */
9246 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9247 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9248 			dc_stream_adjust_vmin_vmax(
9249 				dm->dc, acrtc_state->stream,
9250 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9251 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9252 		}
9253 		mutex_lock(&dm->dc_lock);
9254 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9255 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9256 			amdgpu_dm_psr_disable(acrtc_state->stream);
9257 
9258 		dc_commit_updates_for_stream(dm->dc,
9259 						     bundle->surface_updates,
9260 						     planes_count,
9261 						     acrtc_state->stream,
9262 						     &bundle->stream_update,
9263 						     dc_state);
9264 
9265 		/**
9266 		 * Enable or disable the interrupts on the backend.
9267 		 *
9268 		 * Most pipes are put into power gating when unused.
9269 		 *
9270 		 * When power gating is enabled on a pipe we lose the
9271 		 * interrupt enablement state when power gating is disabled.
9272 		 *
9273 		 * So we need to update the IRQ control state in hardware
9274 		 * whenever the pipe turns on (since it could be previously
9275 		 * power gated) or off (since some pipes can't be power gated
9276 		 * on some ASICs).
9277 		 */
9278 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9279 			dm_update_pflip_irq_state(drm_to_adev(dev),
9280 						  acrtc_attach);
9281 
9282 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9283 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9284 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9285 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9286 
9287 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9288 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9289 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9290 			struct amdgpu_dm_connector *aconn =
9291 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9292 
9293 			if (aconn->psr_skip_count > 0)
9294 				aconn->psr_skip_count--;
9295 
9296 			/* Allow PSR when skip count is 0. */
9297 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9298 		} else {
9299 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9300 		}
9301 
9302 		mutex_unlock(&dm->dc_lock);
9303 	}
9304 
9305 	/*
9306 	 * Update cursor state *after* programming all the planes.
9307 	 * This avoids redundant programming in the case where we're going
9308 	 * to be disabling a single plane - those pipes are being disabled.
9309 	 */
9310 	if (acrtc_state->active_planes)
9311 		amdgpu_dm_commit_cursors(state);
9312 
9313 cleanup:
9314 	kfree(bundle);
9315 }
9316 
9317 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9318 				   struct drm_atomic_state *state)
9319 {
9320 	struct amdgpu_device *adev = drm_to_adev(dev);
9321 	struct amdgpu_dm_connector *aconnector;
9322 	struct drm_connector *connector;
9323 	struct drm_connector_state *old_con_state, *new_con_state;
9324 	struct drm_crtc_state *new_crtc_state;
9325 	struct dm_crtc_state *new_dm_crtc_state;
9326 	const struct dc_stream_status *status;
9327 	int i, inst;
9328 
9329 	/* Notify device removals. */
9330 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9331 		if (old_con_state->crtc != new_con_state->crtc) {
9332 			/* CRTC changes require notification. */
9333 			goto notify;
9334 		}
9335 
9336 		if (!new_con_state->crtc)
9337 			continue;
9338 
9339 		new_crtc_state = drm_atomic_get_new_crtc_state(
9340 			state, new_con_state->crtc);
9341 
9342 		if (!new_crtc_state)
9343 			continue;
9344 
9345 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9346 			continue;
9347 
9348 	notify:
9349 		aconnector = to_amdgpu_dm_connector(connector);
9350 
9351 		mutex_lock(&adev->dm.audio_lock);
9352 		inst = aconnector->audio_inst;
9353 		aconnector->audio_inst = -1;
9354 		mutex_unlock(&adev->dm.audio_lock);
9355 
9356 		amdgpu_dm_audio_eld_notify(adev, inst);
9357 	}
9358 
9359 	/* Notify audio device additions. */
9360 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9361 		if (!new_con_state->crtc)
9362 			continue;
9363 
9364 		new_crtc_state = drm_atomic_get_new_crtc_state(
9365 			state, new_con_state->crtc);
9366 
9367 		if (!new_crtc_state)
9368 			continue;
9369 
9370 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9371 			continue;
9372 
9373 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9374 		if (!new_dm_crtc_state->stream)
9375 			continue;
9376 
9377 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9378 		if (!status)
9379 			continue;
9380 
9381 		aconnector = to_amdgpu_dm_connector(connector);
9382 
9383 		mutex_lock(&adev->dm.audio_lock);
9384 		inst = status->audio_inst;
9385 		aconnector->audio_inst = inst;
9386 		mutex_unlock(&adev->dm.audio_lock);
9387 
9388 		amdgpu_dm_audio_eld_notify(adev, inst);
9389 	}
9390 }
9391 
9392 /*
9393  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9394  * @crtc_state: the DRM CRTC state
9395  * @stream_state: the DC stream state.
9396  *
9397  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9398  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9399  */
9400 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9401 						struct dc_stream_state *stream_state)
9402 {
9403 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9404 }
9405 
9406 /**
9407  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9408  * @state: The atomic state to commit
9409  *
9410  * This will tell DC to commit the constructed DC state from atomic_check,
9411  * programming the hardware. Any failures here implies a hardware failure, since
9412  * atomic check should have filtered anything non-kosher.
9413  */
9414 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9415 {
9416 	struct drm_device *dev = state->dev;
9417 	struct amdgpu_device *adev = drm_to_adev(dev);
9418 	struct amdgpu_display_manager *dm = &adev->dm;
9419 	struct dm_atomic_state *dm_state;
9420 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9421 	uint32_t i, j;
9422 	struct drm_crtc *crtc;
9423 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9424 	unsigned long flags;
9425 	bool wait_for_vblank = true;
9426 	struct drm_connector *connector;
9427 	struct drm_connector_state *old_con_state, *new_con_state;
9428 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9429 	int crtc_disable_count = 0;
9430 	bool mode_set_reset_required = false;
9431 
9432 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9433 
9434 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9435 
9436 	dm_state = dm_atomic_get_new_state(state);
9437 	if (dm_state && dm_state->context) {
9438 		dc_state = dm_state->context;
9439 	} else {
9440 		/* No state changes, retain current state. */
9441 		dc_state_temp = dc_create_state(dm->dc);
9442 		ASSERT(dc_state_temp);
9443 		dc_state = dc_state_temp;
9444 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9445 	}
9446 
9447 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9448 				       new_crtc_state, i) {
9449 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9450 
9451 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9452 
9453 		if (old_crtc_state->active &&
9454 		    (!new_crtc_state->active ||
9455 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9456 			manage_dm_interrupts(adev, acrtc, false);
9457 			dc_stream_release(dm_old_crtc_state->stream);
9458 		}
9459 	}
9460 
9461 	drm_atomic_helper_calc_timestamping_constants(state);
9462 
9463 	/* update changed items */
9464 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9465 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9466 
9467 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9468 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9469 
9470 		DRM_DEBUG_ATOMIC(
9471 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9472 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9473 			"connectors_changed:%d\n",
9474 			acrtc->crtc_id,
9475 			new_crtc_state->enable,
9476 			new_crtc_state->active,
9477 			new_crtc_state->planes_changed,
9478 			new_crtc_state->mode_changed,
9479 			new_crtc_state->active_changed,
9480 			new_crtc_state->connectors_changed);
9481 
9482 		/* Disable cursor if disabling crtc */
9483 		if (old_crtc_state->active && !new_crtc_state->active) {
9484 			struct dc_cursor_position position;
9485 
9486 			memset(&position, 0, sizeof(position));
9487 			mutex_lock(&dm->dc_lock);
9488 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9489 			mutex_unlock(&dm->dc_lock);
9490 		}
9491 
9492 		/* Copy all transient state flags into dc state */
9493 		if (dm_new_crtc_state->stream) {
9494 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9495 							    dm_new_crtc_state->stream);
9496 		}
9497 
9498 		/* handles headless hotplug case, updating new_state and
9499 		 * aconnector as needed
9500 		 */
9501 
9502 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9503 
9504 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9505 
9506 			if (!dm_new_crtc_state->stream) {
9507 				/*
9508 				 * this could happen because of issues with
9509 				 * userspace notifications delivery.
9510 				 * In this case userspace tries to set mode on
9511 				 * display which is disconnected in fact.
9512 				 * dc_sink is NULL in this case on aconnector.
9513 				 * We expect reset mode will come soon.
9514 				 *
9515 				 * This can also happen when unplug is done
9516 				 * during resume sequence ended
9517 				 *
9518 				 * In this case, we want to pretend we still
9519 				 * have a sink to keep the pipe running so that
9520 				 * hw state is consistent with the sw state
9521 				 */
9522 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9523 						__func__, acrtc->base.base.id);
9524 				continue;
9525 			}
9526 
9527 			if (dm_old_crtc_state->stream)
9528 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9529 
9530 			pm_runtime_get_noresume(dev->dev);
9531 
9532 			acrtc->enabled = true;
9533 			acrtc->hw_mode = new_crtc_state->mode;
9534 			crtc->hwmode = new_crtc_state->mode;
9535 			mode_set_reset_required = true;
9536 		} else if (modereset_required(new_crtc_state)) {
9537 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9538 			/* i.e. reset mode */
9539 			if (dm_old_crtc_state->stream)
9540 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9541 
9542 			mode_set_reset_required = true;
9543 		}
9544 	} /* for_each_crtc_in_state() */
9545 
9546 	if (dc_state) {
9547 		/* if there mode set or reset, disable eDP PSR */
9548 		if (mode_set_reset_required) {
9549 #if defined(CONFIG_DRM_AMD_DC_DCN)
9550 			if (dm->vblank_control_workqueue)
9551 				flush_workqueue(dm->vblank_control_workqueue);
9552 #endif
9553 			amdgpu_dm_psr_disable_all(dm);
9554 		}
9555 
9556 		dm_enable_per_frame_crtc_master_sync(dc_state);
9557 		mutex_lock(&dm->dc_lock);
9558 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9559 #if defined(CONFIG_DRM_AMD_DC_DCN)
9560                /* Allow idle optimization when vblank count is 0 for display off */
9561                if (dm->active_vblank_irq_count == 0)
9562                    dc_allow_idle_optimizations(dm->dc,true);
9563 #endif
9564 		mutex_unlock(&dm->dc_lock);
9565 	}
9566 
9567 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9568 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9569 
9570 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9571 
9572 		if (dm_new_crtc_state->stream != NULL) {
9573 			const struct dc_stream_status *status =
9574 					dc_stream_get_status(dm_new_crtc_state->stream);
9575 
9576 			if (!status)
9577 				status = dc_stream_get_status_from_state(dc_state,
9578 									 dm_new_crtc_state->stream);
9579 			if (!status)
9580 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9581 			else
9582 				acrtc->otg_inst = status->primary_otg_inst;
9583 		}
9584 	}
9585 #ifdef CONFIG_DRM_AMD_DC_HDCP
9586 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9587 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9588 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9589 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9590 
9591 		new_crtc_state = NULL;
9592 
9593 		if (acrtc)
9594 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9595 
9596 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9597 
9598 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9599 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9600 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9601 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9602 			dm_new_con_state->update_hdcp = true;
9603 			continue;
9604 		}
9605 
9606 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9607 			hdcp_update_display(
9608 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9609 				new_con_state->hdcp_content_type,
9610 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9611 	}
9612 #endif
9613 
9614 	/* Handle connector state changes */
9615 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9616 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9617 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9618 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9619 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9620 		struct dc_stream_update stream_update;
9621 		struct dc_info_packet hdr_packet;
9622 		struct dc_stream_status *status = NULL;
9623 		bool abm_changed, hdr_changed, scaling_changed;
9624 
9625 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9626 		memset(&stream_update, 0, sizeof(stream_update));
9627 
9628 		if (acrtc) {
9629 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9630 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9631 		}
9632 
9633 		/* Skip any modesets/resets */
9634 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9635 			continue;
9636 
9637 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9638 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9639 
9640 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9641 							     dm_old_con_state);
9642 
9643 		abm_changed = dm_new_crtc_state->abm_level !=
9644 			      dm_old_crtc_state->abm_level;
9645 
9646 		hdr_changed =
9647 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9648 
9649 		if (!scaling_changed && !abm_changed && !hdr_changed)
9650 			continue;
9651 
9652 		stream_update.stream = dm_new_crtc_state->stream;
9653 		if (scaling_changed) {
9654 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9655 					dm_new_con_state, dm_new_crtc_state->stream);
9656 
9657 			stream_update.src = dm_new_crtc_state->stream->src;
9658 			stream_update.dst = dm_new_crtc_state->stream->dst;
9659 		}
9660 
9661 		if (abm_changed) {
9662 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9663 
9664 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9665 		}
9666 
9667 		if (hdr_changed) {
9668 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9669 			stream_update.hdr_static_metadata = &hdr_packet;
9670 		}
9671 
9672 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9673 
9674 		if (WARN_ON(!status))
9675 			continue;
9676 
9677 		WARN_ON(!status->plane_count);
9678 
9679 		/*
9680 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9681 		 * Here we create an empty update on each plane.
9682 		 * To fix this, DC should permit updating only stream properties.
9683 		 */
9684 		for (j = 0; j < status->plane_count; j++)
9685 			dummy_updates[j].surface = status->plane_states[0];
9686 
9687 
9688 		mutex_lock(&dm->dc_lock);
9689 		dc_commit_updates_for_stream(dm->dc,
9690 						     dummy_updates,
9691 						     status->plane_count,
9692 						     dm_new_crtc_state->stream,
9693 						     &stream_update,
9694 						     dc_state);
9695 		mutex_unlock(&dm->dc_lock);
9696 	}
9697 
9698 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9699 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9700 				      new_crtc_state, i) {
9701 		if (old_crtc_state->active && !new_crtc_state->active)
9702 			crtc_disable_count++;
9703 
9704 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9705 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9706 
9707 		/* For freesync config update on crtc state and params for irq */
9708 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9709 
9710 		/* Handle vrr on->off / off->on transitions */
9711 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9712 						dm_new_crtc_state);
9713 	}
9714 
9715 	/**
9716 	 * Enable interrupts for CRTCs that are newly enabled or went through
9717 	 * a modeset. It was intentionally deferred until after the front end
9718 	 * state was modified to wait until the OTG was on and so the IRQ
9719 	 * handlers didn't access stale or invalid state.
9720 	 */
9721 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9722 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9723 #ifdef CONFIG_DEBUG_FS
9724 		bool configure_crc = false;
9725 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9726 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9727 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9728 #endif
9729 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9730 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9731 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9732 #endif
9733 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9734 
9735 		if (new_crtc_state->active &&
9736 		    (!old_crtc_state->active ||
9737 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9738 			dc_stream_retain(dm_new_crtc_state->stream);
9739 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9740 			manage_dm_interrupts(adev, acrtc, true);
9741 
9742 #ifdef CONFIG_DEBUG_FS
9743 			/**
9744 			 * Frontend may have changed so reapply the CRC capture
9745 			 * settings for the stream.
9746 			 */
9747 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9748 
9749 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9750 				configure_crc = true;
9751 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9752 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9753 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9754 					acrtc->dm_irq_params.crc_window.update_win = true;
9755 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9756 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9757 					crc_rd_wrk->crtc = crtc;
9758 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9759 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9760 				}
9761 #endif
9762 			}
9763 
9764 			if (configure_crc)
9765 				if (amdgpu_dm_crtc_configure_crc_source(
9766 					crtc, dm_new_crtc_state, cur_crc_src))
9767 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9768 #endif
9769 		}
9770 	}
9771 
9772 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9773 		if (new_crtc_state->async_flip)
9774 			wait_for_vblank = false;
9775 
9776 	/* update planes when needed per crtc*/
9777 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9778 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9779 
9780 		if (dm_new_crtc_state->stream)
9781 			amdgpu_dm_commit_planes(state, dc_state, dev,
9782 						dm, crtc, wait_for_vblank);
9783 	}
9784 
9785 	/* Update audio instances for each connector. */
9786 	amdgpu_dm_commit_audio(dev, state);
9787 
9788 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9789 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9790 	/* restore the backlight level */
9791 	for (i = 0; i < dm->num_of_edps; i++) {
9792 		if (dm->backlight_dev[i] &&
9793 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9794 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9795 	}
9796 #endif
9797 	/*
9798 	 * send vblank event on all events not handled in flip and
9799 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9800 	 */
9801 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9802 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9803 
9804 		if (new_crtc_state->event)
9805 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9806 
9807 		new_crtc_state->event = NULL;
9808 	}
9809 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9810 
9811 	/* Signal HW programming completion */
9812 	drm_atomic_helper_commit_hw_done(state);
9813 
9814 	if (wait_for_vblank)
9815 		drm_atomic_helper_wait_for_flip_done(dev, state);
9816 
9817 	drm_atomic_helper_cleanup_planes(dev, state);
9818 
9819 	/* return the stolen vga memory back to VRAM */
9820 	if (!adev->mman.keep_stolen_vga_memory)
9821 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9822 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9823 
9824 	/*
9825 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9826 	 * so we can put the GPU into runtime suspend if we're not driving any
9827 	 * displays anymore
9828 	 */
9829 	for (i = 0; i < crtc_disable_count; i++)
9830 		pm_runtime_put_autosuspend(dev->dev);
9831 	pm_runtime_mark_last_busy(dev->dev);
9832 
9833 	if (dc_state_temp)
9834 		dc_release_state(dc_state_temp);
9835 }
9836 
9837 
9838 static int dm_force_atomic_commit(struct drm_connector *connector)
9839 {
9840 	int ret = 0;
9841 	struct drm_device *ddev = connector->dev;
9842 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9843 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9844 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9845 	struct drm_connector_state *conn_state;
9846 	struct drm_crtc_state *crtc_state;
9847 	struct drm_plane_state *plane_state;
9848 
9849 	if (!state)
9850 		return -ENOMEM;
9851 
9852 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9853 
9854 	/* Construct an atomic state to restore previous display setting */
9855 
9856 	/*
9857 	 * Attach connectors to drm_atomic_state
9858 	 */
9859 	conn_state = drm_atomic_get_connector_state(state, connector);
9860 
9861 	ret = PTR_ERR_OR_ZERO(conn_state);
9862 	if (ret)
9863 		goto out;
9864 
9865 	/* Attach crtc to drm_atomic_state*/
9866 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9867 
9868 	ret = PTR_ERR_OR_ZERO(crtc_state);
9869 	if (ret)
9870 		goto out;
9871 
9872 	/* force a restore */
9873 	crtc_state->mode_changed = true;
9874 
9875 	/* Attach plane to drm_atomic_state */
9876 	plane_state = drm_atomic_get_plane_state(state, plane);
9877 
9878 	ret = PTR_ERR_OR_ZERO(plane_state);
9879 	if (ret)
9880 		goto out;
9881 
9882 	/* Call commit internally with the state we just constructed */
9883 	ret = drm_atomic_commit(state);
9884 
9885 out:
9886 	drm_atomic_state_put(state);
9887 	if (ret)
9888 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9889 
9890 	return ret;
9891 }
9892 
9893 /*
9894  * This function handles all cases when set mode does not come upon hotplug.
9895  * This includes when a display is unplugged then plugged back into the
9896  * same port and when running without usermode desktop manager supprot
9897  */
9898 void dm_restore_drm_connector_state(struct drm_device *dev,
9899 				    struct drm_connector *connector)
9900 {
9901 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9902 	struct amdgpu_crtc *disconnected_acrtc;
9903 	struct dm_crtc_state *acrtc_state;
9904 
9905 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9906 		return;
9907 
9908 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9909 	if (!disconnected_acrtc)
9910 		return;
9911 
9912 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9913 	if (!acrtc_state->stream)
9914 		return;
9915 
9916 	/*
9917 	 * If the previous sink is not released and different from the current,
9918 	 * we deduce we are in a state where we can not rely on usermode call
9919 	 * to turn on the display, so we do it here
9920 	 */
9921 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9922 		dm_force_atomic_commit(&aconnector->base);
9923 }
9924 
9925 /*
9926  * Grabs all modesetting locks to serialize against any blocking commits,
9927  * Waits for completion of all non blocking commits.
9928  */
9929 static int do_aquire_global_lock(struct drm_device *dev,
9930 				 struct drm_atomic_state *state)
9931 {
9932 	struct drm_crtc *crtc;
9933 	struct drm_crtc_commit *commit;
9934 	long ret;
9935 
9936 	/*
9937 	 * Adding all modeset locks to aquire_ctx will
9938 	 * ensure that when the framework release it the
9939 	 * extra locks we are locking here will get released to
9940 	 */
9941 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9942 	if (ret)
9943 		return ret;
9944 
9945 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9946 		spin_lock(&crtc->commit_lock);
9947 		commit = list_first_entry_or_null(&crtc->commit_list,
9948 				struct drm_crtc_commit, commit_entry);
9949 		if (commit)
9950 			drm_crtc_commit_get(commit);
9951 		spin_unlock(&crtc->commit_lock);
9952 
9953 		if (!commit)
9954 			continue;
9955 
9956 		/*
9957 		 * Make sure all pending HW programming completed and
9958 		 * page flips done
9959 		 */
9960 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9961 
9962 		if (ret > 0)
9963 			ret = wait_for_completion_interruptible_timeout(
9964 					&commit->flip_done, 10*HZ);
9965 
9966 		if (ret == 0)
9967 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9968 				  "timed out\n", crtc->base.id, crtc->name);
9969 
9970 		drm_crtc_commit_put(commit);
9971 	}
9972 
9973 	return ret < 0 ? ret : 0;
9974 }
9975 
9976 static void get_freesync_config_for_crtc(
9977 	struct dm_crtc_state *new_crtc_state,
9978 	struct dm_connector_state *new_con_state)
9979 {
9980 	struct mod_freesync_config config = {0};
9981 	struct amdgpu_dm_connector *aconnector =
9982 			to_amdgpu_dm_connector(new_con_state->base.connector);
9983 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9984 	int vrefresh = drm_mode_vrefresh(mode);
9985 	bool fs_vid_mode = false;
9986 
9987 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9988 					vrefresh >= aconnector->min_vfreq &&
9989 					vrefresh <= aconnector->max_vfreq;
9990 
9991 	if (new_crtc_state->vrr_supported) {
9992 		new_crtc_state->stream->ignore_msa_timing_param = true;
9993 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9994 
9995 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9996 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9997 		config.vsif_supported = true;
9998 		config.btr = true;
9999 
10000 		if (fs_vid_mode) {
10001 			config.state = VRR_STATE_ACTIVE_FIXED;
10002 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10003 			goto out;
10004 		} else if (new_crtc_state->base.vrr_enabled) {
10005 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10006 		} else {
10007 			config.state = VRR_STATE_INACTIVE;
10008 		}
10009 	}
10010 out:
10011 	new_crtc_state->freesync_config = config;
10012 }
10013 
10014 static void reset_freesync_config_for_crtc(
10015 	struct dm_crtc_state *new_crtc_state)
10016 {
10017 	new_crtc_state->vrr_supported = false;
10018 
10019 	memset(&new_crtc_state->vrr_infopacket, 0,
10020 	       sizeof(new_crtc_state->vrr_infopacket));
10021 }
10022 
10023 static bool
10024 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10025 				 struct drm_crtc_state *new_crtc_state)
10026 {
10027 	struct drm_display_mode old_mode, new_mode;
10028 
10029 	if (!old_crtc_state || !new_crtc_state)
10030 		return false;
10031 
10032 	old_mode = old_crtc_state->mode;
10033 	new_mode = new_crtc_state->mode;
10034 
10035 	if (old_mode.clock       == new_mode.clock &&
10036 	    old_mode.hdisplay    == new_mode.hdisplay &&
10037 	    old_mode.vdisplay    == new_mode.vdisplay &&
10038 	    old_mode.htotal      == new_mode.htotal &&
10039 	    old_mode.vtotal      != new_mode.vtotal &&
10040 	    old_mode.hsync_start == new_mode.hsync_start &&
10041 	    old_mode.vsync_start != new_mode.vsync_start &&
10042 	    old_mode.hsync_end   == new_mode.hsync_end &&
10043 	    old_mode.vsync_end   != new_mode.vsync_end &&
10044 	    old_mode.hskew       == new_mode.hskew &&
10045 	    old_mode.vscan       == new_mode.vscan &&
10046 	    (old_mode.vsync_end - old_mode.vsync_start) ==
10047 	    (new_mode.vsync_end - new_mode.vsync_start))
10048 		return true;
10049 
10050 	return false;
10051 }
10052 
10053 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10054 	uint64_t num, den, res;
10055 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10056 
10057 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10058 
10059 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10060 	den = (unsigned long long)new_crtc_state->mode.htotal *
10061 	      (unsigned long long)new_crtc_state->mode.vtotal;
10062 
10063 	res = div_u64(num, den);
10064 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10065 }
10066 
10067 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10068 				struct drm_atomic_state *state,
10069 				struct drm_crtc *crtc,
10070 				struct drm_crtc_state *old_crtc_state,
10071 				struct drm_crtc_state *new_crtc_state,
10072 				bool enable,
10073 				bool *lock_and_validation_needed)
10074 {
10075 	struct dm_atomic_state *dm_state = NULL;
10076 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10077 	struct dc_stream_state *new_stream;
10078 	int ret = 0;
10079 
10080 	/*
10081 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10082 	 * update changed items
10083 	 */
10084 	struct amdgpu_crtc *acrtc = NULL;
10085 	struct amdgpu_dm_connector *aconnector = NULL;
10086 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10087 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10088 
10089 	new_stream = NULL;
10090 
10091 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10092 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10093 	acrtc = to_amdgpu_crtc(crtc);
10094 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10095 
10096 	/* TODO This hack should go away */
10097 	if (aconnector && enable) {
10098 		/* Make sure fake sink is created in plug-in scenario */
10099 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10100 							    &aconnector->base);
10101 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10102 							    &aconnector->base);
10103 
10104 		if (IS_ERR(drm_new_conn_state)) {
10105 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10106 			goto fail;
10107 		}
10108 
10109 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10110 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10111 
10112 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10113 			goto skip_modeset;
10114 
10115 		new_stream = create_validate_stream_for_sink(aconnector,
10116 							     &new_crtc_state->mode,
10117 							     dm_new_conn_state,
10118 							     dm_old_crtc_state->stream);
10119 
10120 		/*
10121 		 * we can have no stream on ACTION_SET if a display
10122 		 * was disconnected during S3, in this case it is not an
10123 		 * error, the OS will be updated after detection, and
10124 		 * will do the right thing on next atomic commit
10125 		 */
10126 
10127 		if (!new_stream) {
10128 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10129 					__func__, acrtc->base.base.id);
10130 			ret = -ENOMEM;
10131 			goto fail;
10132 		}
10133 
10134 		/*
10135 		 * TODO: Check VSDB bits to decide whether this should
10136 		 * be enabled or not.
10137 		 */
10138 		new_stream->triggered_crtc_reset.enabled =
10139 			dm->force_timing_sync;
10140 
10141 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10142 
10143 		ret = fill_hdr_info_packet(drm_new_conn_state,
10144 					   &new_stream->hdr_static_metadata);
10145 		if (ret)
10146 			goto fail;
10147 
10148 		/*
10149 		 * If we already removed the old stream from the context
10150 		 * (and set the new stream to NULL) then we can't reuse
10151 		 * the old stream even if the stream and scaling are unchanged.
10152 		 * We'll hit the BUG_ON and black screen.
10153 		 *
10154 		 * TODO: Refactor this function to allow this check to work
10155 		 * in all conditions.
10156 		 */
10157 		if (amdgpu_freesync_vid_mode &&
10158 		    dm_new_crtc_state->stream &&
10159 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10160 			goto skip_modeset;
10161 
10162 		if (dm_new_crtc_state->stream &&
10163 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10164 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10165 			new_crtc_state->mode_changed = false;
10166 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10167 					 new_crtc_state->mode_changed);
10168 		}
10169 	}
10170 
10171 	/* mode_changed flag may get updated above, need to check again */
10172 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10173 		goto skip_modeset;
10174 
10175 	DRM_DEBUG_ATOMIC(
10176 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10177 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10178 		"connectors_changed:%d\n",
10179 		acrtc->crtc_id,
10180 		new_crtc_state->enable,
10181 		new_crtc_state->active,
10182 		new_crtc_state->planes_changed,
10183 		new_crtc_state->mode_changed,
10184 		new_crtc_state->active_changed,
10185 		new_crtc_state->connectors_changed);
10186 
10187 	/* Remove stream for any changed/disabled CRTC */
10188 	if (!enable) {
10189 
10190 		if (!dm_old_crtc_state->stream)
10191 			goto skip_modeset;
10192 
10193 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10194 		    is_timing_unchanged_for_freesync(new_crtc_state,
10195 						     old_crtc_state)) {
10196 			new_crtc_state->mode_changed = false;
10197 			DRM_DEBUG_DRIVER(
10198 				"Mode change not required for front porch change, "
10199 				"setting mode_changed to %d",
10200 				new_crtc_state->mode_changed);
10201 
10202 			set_freesync_fixed_config(dm_new_crtc_state);
10203 
10204 			goto skip_modeset;
10205 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10206 			   is_freesync_video_mode(&new_crtc_state->mode,
10207 						  aconnector)) {
10208 			struct drm_display_mode *high_mode;
10209 
10210 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10211 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10212 				set_freesync_fixed_config(dm_new_crtc_state);
10213 			}
10214 		}
10215 
10216 		ret = dm_atomic_get_state(state, &dm_state);
10217 		if (ret)
10218 			goto fail;
10219 
10220 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10221 				crtc->base.id);
10222 
10223 		/* i.e. reset mode */
10224 		if (dc_remove_stream_from_ctx(
10225 				dm->dc,
10226 				dm_state->context,
10227 				dm_old_crtc_state->stream) != DC_OK) {
10228 			ret = -EINVAL;
10229 			goto fail;
10230 		}
10231 
10232 		dc_stream_release(dm_old_crtc_state->stream);
10233 		dm_new_crtc_state->stream = NULL;
10234 
10235 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10236 
10237 		*lock_and_validation_needed = true;
10238 
10239 	} else {/* Add stream for any updated/enabled CRTC */
10240 		/*
10241 		 * Quick fix to prevent NULL pointer on new_stream when
10242 		 * added MST connectors not found in existing crtc_state in the chained mode
10243 		 * TODO: need to dig out the root cause of that
10244 		 */
10245 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10246 			goto skip_modeset;
10247 
10248 		if (modereset_required(new_crtc_state))
10249 			goto skip_modeset;
10250 
10251 		if (modeset_required(new_crtc_state, new_stream,
10252 				     dm_old_crtc_state->stream)) {
10253 
10254 			WARN_ON(dm_new_crtc_state->stream);
10255 
10256 			ret = dm_atomic_get_state(state, &dm_state);
10257 			if (ret)
10258 				goto fail;
10259 
10260 			dm_new_crtc_state->stream = new_stream;
10261 
10262 			dc_stream_retain(new_stream);
10263 
10264 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10265 					 crtc->base.id);
10266 
10267 			if (dc_add_stream_to_ctx(
10268 					dm->dc,
10269 					dm_state->context,
10270 					dm_new_crtc_state->stream) != DC_OK) {
10271 				ret = -EINVAL;
10272 				goto fail;
10273 			}
10274 
10275 			*lock_and_validation_needed = true;
10276 		}
10277 	}
10278 
10279 skip_modeset:
10280 	/* Release extra reference */
10281 	if (new_stream)
10282 		 dc_stream_release(new_stream);
10283 
10284 	/*
10285 	 * We want to do dc stream updates that do not require a
10286 	 * full modeset below.
10287 	 */
10288 	if (!(enable && aconnector && new_crtc_state->active))
10289 		return 0;
10290 	/*
10291 	 * Given above conditions, the dc state cannot be NULL because:
10292 	 * 1. We're in the process of enabling CRTCs (just been added
10293 	 *    to the dc context, or already is on the context)
10294 	 * 2. Has a valid connector attached, and
10295 	 * 3. Is currently active and enabled.
10296 	 * => The dc stream state currently exists.
10297 	 */
10298 	BUG_ON(dm_new_crtc_state->stream == NULL);
10299 
10300 	/* Scaling or underscan settings */
10301 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10302 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10303 		update_stream_scaling_settings(
10304 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10305 
10306 	/* ABM settings */
10307 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10308 
10309 	/*
10310 	 * Color management settings. We also update color properties
10311 	 * when a modeset is needed, to ensure it gets reprogrammed.
10312 	 */
10313 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10314 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10315 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10316 		if (ret)
10317 			goto fail;
10318 	}
10319 
10320 	/* Update Freesync settings. */
10321 	get_freesync_config_for_crtc(dm_new_crtc_state,
10322 				     dm_new_conn_state);
10323 
10324 	return ret;
10325 
10326 fail:
10327 	if (new_stream)
10328 		dc_stream_release(new_stream);
10329 	return ret;
10330 }
10331 
10332 static bool should_reset_plane(struct drm_atomic_state *state,
10333 			       struct drm_plane *plane,
10334 			       struct drm_plane_state *old_plane_state,
10335 			       struct drm_plane_state *new_plane_state)
10336 {
10337 	struct drm_plane *other;
10338 	struct drm_plane_state *old_other_state, *new_other_state;
10339 	struct drm_crtc_state *new_crtc_state;
10340 	int i;
10341 
10342 	/*
10343 	 * TODO: Remove this hack once the checks below are sufficient
10344 	 * enough to determine when we need to reset all the planes on
10345 	 * the stream.
10346 	 */
10347 	if (state->allow_modeset)
10348 		return true;
10349 
10350 	/* Exit early if we know that we're adding or removing the plane. */
10351 	if (old_plane_state->crtc != new_plane_state->crtc)
10352 		return true;
10353 
10354 	/* old crtc == new_crtc == NULL, plane not in context. */
10355 	if (!new_plane_state->crtc)
10356 		return false;
10357 
10358 	new_crtc_state =
10359 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10360 
10361 	if (!new_crtc_state)
10362 		return true;
10363 
10364 	/* CRTC Degamma changes currently require us to recreate planes. */
10365 	if (new_crtc_state->color_mgmt_changed)
10366 		return true;
10367 
10368 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10369 		return true;
10370 
10371 	/*
10372 	 * If there are any new primary or overlay planes being added or
10373 	 * removed then the z-order can potentially change. To ensure
10374 	 * correct z-order and pipe acquisition the current DC architecture
10375 	 * requires us to remove and recreate all existing planes.
10376 	 *
10377 	 * TODO: Come up with a more elegant solution for this.
10378 	 */
10379 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10380 		struct amdgpu_framebuffer *old_afb, *new_afb;
10381 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10382 			continue;
10383 
10384 		if (old_other_state->crtc != new_plane_state->crtc &&
10385 		    new_other_state->crtc != new_plane_state->crtc)
10386 			continue;
10387 
10388 		if (old_other_state->crtc != new_other_state->crtc)
10389 			return true;
10390 
10391 		/* Src/dst size and scaling updates. */
10392 		if (old_other_state->src_w != new_other_state->src_w ||
10393 		    old_other_state->src_h != new_other_state->src_h ||
10394 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10395 		    old_other_state->crtc_h != new_other_state->crtc_h)
10396 			return true;
10397 
10398 		/* Rotation / mirroring updates. */
10399 		if (old_other_state->rotation != new_other_state->rotation)
10400 			return true;
10401 
10402 		/* Blending updates. */
10403 		if (old_other_state->pixel_blend_mode !=
10404 		    new_other_state->pixel_blend_mode)
10405 			return true;
10406 
10407 		/* Alpha updates. */
10408 		if (old_other_state->alpha != new_other_state->alpha)
10409 			return true;
10410 
10411 		/* Colorspace changes. */
10412 		if (old_other_state->color_range != new_other_state->color_range ||
10413 		    old_other_state->color_encoding != new_other_state->color_encoding)
10414 			return true;
10415 
10416 		/* Framebuffer checks fall at the end. */
10417 		if (!old_other_state->fb || !new_other_state->fb)
10418 			continue;
10419 
10420 		/* Pixel format changes can require bandwidth updates. */
10421 		if (old_other_state->fb->format != new_other_state->fb->format)
10422 			return true;
10423 
10424 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10425 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10426 
10427 		/* Tiling and DCC changes also require bandwidth updates. */
10428 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10429 		    old_afb->base.modifier != new_afb->base.modifier)
10430 			return true;
10431 	}
10432 
10433 	return false;
10434 }
10435 
10436 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10437 			      struct drm_plane_state *new_plane_state,
10438 			      struct drm_framebuffer *fb)
10439 {
10440 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10441 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10442 	unsigned int pitch;
10443 	bool linear;
10444 
10445 	if (fb->width > new_acrtc->max_cursor_width ||
10446 	    fb->height > new_acrtc->max_cursor_height) {
10447 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10448 				 new_plane_state->fb->width,
10449 				 new_plane_state->fb->height);
10450 		return -EINVAL;
10451 	}
10452 	if (new_plane_state->src_w != fb->width << 16 ||
10453 	    new_plane_state->src_h != fb->height << 16) {
10454 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10455 		return -EINVAL;
10456 	}
10457 
10458 	/* Pitch in pixels */
10459 	pitch = fb->pitches[0] / fb->format->cpp[0];
10460 
10461 	if (fb->width != pitch) {
10462 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10463 				 fb->width, pitch);
10464 		return -EINVAL;
10465 	}
10466 
10467 	switch (pitch) {
10468 	case 64:
10469 	case 128:
10470 	case 256:
10471 		/* FB pitch is supported by cursor plane */
10472 		break;
10473 	default:
10474 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10475 		return -EINVAL;
10476 	}
10477 
10478 	/* Core DRM takes care of checking FB modifiers, so we only need to
10479 	 * check tiling flags when the FB doesn't have a modifier. */
10480 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10481 		if (adev->family < AMDGPU_FAMILY_AI) {
10482 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10483 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10484 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10485 		} else {
10486 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10487 		}
10488 		if (!linear) {
10489 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10490 			return -EINVAL;
10491 		}
10492 	}
10493 
10494 	return 0;
10495 }
10496 
10497 static int dm_update_plane_state(struct dc *dc,
10498 				 struct drm_atomic_state *state,
10499 				 struct drm_plane *plane,
10500 				 struct drm_plane_state *old_plane_state,
10501 				 struct drm_plane_state *new_plane_state,
10502 				 bool enable,
10503 				 bool *lock_and_validation_needed)
10504 {
10505 
10506 	struct dm_atomic_state *dm_state = NULL;
10507 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10508 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10509 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10510 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10511 	struct amdgpu_crtc *new_acrtc;
10512 	bool needs_reset;
10513 	int ret = 0;
10514 
10515 
10516 	new_plane_crtc = new_plane_state->crtc;
10517 	old_plane_crtc = old_plane_state->crtc;
10518 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10519 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10520 
10521 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10522 		if (!enable || !new_plane_crtc ||
10523 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10524 			return 0;
10525 
10526 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10527 
10528 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10529 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10530 			return -EINVAL;
10531 		}
10532 
10533 		if (new_plane_state->fb) {
10534 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10535 						 new_plane_state->fb);
10536 			if (ret)
10537 				return ret;
10538 		}
10539 
10540 		return 0;
10541 	}
10542 
10543 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10544 					 new_plane_state);
10545 
10546 	/* Remove any changed/removed planes */
10547 	if (!enable) {
10548 		if (!needs_reset)
10549 			return 0;
10550 
10551 		if (!old_plane_crtc)
10552 			return 0;
10553 
10554 		old_crtc_state = drm_atomic_get_old_crtc_state(
10555 				state, old_plane_crtc);
10556 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10557 
10558 		if (!dm_old_crtc_state->stream)
10559 			return 0;
10560 
10561 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10562 				plane->base.id, old_plane_crtc->base.id);
10563 
10564 		ret = dm_atomic_get_state(state, &dm_state);
10565 		if (ret)
10566 			return ret;
10567 
10568 		if (!dc_remove_plane_from_context(
10569 				dc,
10570 				dm_old_crtc_state->stream,
10571 				dm_old_plane_state->dc_state,
10572 				dm_state->context)) {
10573 
10574 			return -EINVAL;
10575 		}
10576 
10577 
10578 		dc_plane_state_release(dm_old_plane_state->dc_state);
10579 		dm_new_plane_state->dc_state = NULL;
10580 
10581 		*lock_and_validation_needed = true;
10582 
10583 	} else { /* Add new planes */
10584 		struct dc_plane_state *dc_new_plane_state;
10585 
10586 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10587 			return 0;
10588 
10589 		if (!new_plane_crtc)
10590 			return 0;
10591 
10592 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10593 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10594 
10595 		if (!dm_new_crtc_state->stream)
10596 			return 0;
10597 
10598 		if (!needs_reset)
10599 			return 0;
10600 
10601 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10602 		if (ret)
10603 			return ret;
10604 
10605 		WARN_ON(dm_new_plane_state->dc_state);
10606 
10607 		dc_new_plane_state = dc_create_plane_state(dc);
10608 		if (!dc_new_plane_state)
10609 			return -ENOMEM;
10610 
10611 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10612 				 plane->base.id, new_plane_crtc->base.id);
10613 
10614 		ret = fill_dc_plane_attributes(
10615 			drm_to_adev(new_plane_crtc->dev),
10616 			dc_new_plane_state,
10617 			new_plane_state,
10618 			new_crtc_state);
10619 		if (ret) {
10620 			dc_plane_state_release(dc_new_plane_state);
10621 			return ret;
10622 		}
10623 
10624 		ret = dm_atomic_get_state(state, &dm_state);
10625 		if (ret) {
10626 			dc_plane_state_release(dc_new_plane_state);
10627 			return ret;
10628 		}
10629 
10630 		/*
10631 		 * Any atomic check errors that occur after this will
10632 		 * not need a release. The plane state will be attached
10633 		 * to the stream, and therefore part of the atomic
10634 		 * state. It'll be released when the atomic state is
10635 		 * cleaned.
10636 		 */
10637 		if (!dc_add_plane_to_context(
10638 				dc,
10639 				dm_new_crtc_state->stream,
10640 				dc_new_plane_state,
10641 				dm_state->context)) {
10642 
10643 			dc_plane_state_release(dc_new_plane_state);
10644 			return -EINVAL;
10645 		}
10646 
10647 		dm_new_plane_state->dc_state = dc_new_plane_state;
10648 
10649 		/* Tell DC to do a full surface update every time there
10650 		 * is a plane change. Inefficient, but works for now.
10651 		 */
10652 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10653 
10654 		*lock_and_validation_needed = true;
10655 	}
10656 
10657 
10658 	return ret;
10659 }
10660 
10661 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10662 				struct drm_crtc *crtc,
10663 				struct drm_crtc_state *new_crtc_state)
10664 {
10665 	struct drm_plane *cursor = crtc->cursor, *underlying;
10666 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10667 	int i;
10668 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10669 
10670 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10671 	 * cursor per pipe but it's going to inherit the scaling and
10672 	 * positioning from the underlying pipe. Check the cursor plane's
10673 	 * blending properties match the underlying planes'. */
10674 
10675 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10676 	if (!new_cursor_state || !new_cursor_state->fb) {
10677 		return 0;
10678 	}
10679 
10680 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10681 			 (new_cursor_state->src_w >> 16);
10682 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10683 			 (new_cursor_state->src_h >> 16);
10684 
10685 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10686 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10687 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10688 			continue;
10689 
10690 		/* Ignore disabled planes */
10691 		if (!new_underlying_state->fb)
10692 			continue;
10693 
10694 		underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10695 				     (new_underlying_state->src_w >> 16);
10696 		underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10697 				     (new_underlying_state->src_h >> 16);
10698 
10699 		if (cursor_scale_w != underlying_scale_w ||
10700 		    cursor_scale_h != underlying_scale_h) {
10701 			drm_dbg_atomic(crtc->dev,
10702 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10703 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10704 			return -EINVAL;
10705 		}
10706 
10707 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10708 		if (new_underlying_state->crtc_x <= 0 &&
10709 		    new_underlying_state->crtc_y <= 0 &&
10710 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10711 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10712 			break;
10713 	}
10714 
10715 	return 0;
10716 }
10717 
10718 #if defined(CONFIG_DRM_AMD_DC_DCN)
10719 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10720 {
10721 	struct drm_connector *connector;
10722 	struct drm_connector_state *conn_state;
10723 	struct amdgpu_dm_connector *aconnector = NULL;
10724 	int i;
10725 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10726 		if (conn_state->crtc != crtc)
10727 			continue;
10728 
10729 		aconnector = to_amdgpu_dm_connector(connector);
10730 		if (!aconnector->port || !aconnector->mst_port)
10731 			aconnector = NULL;
10732 		else
10733 			break;
10734 	}
10735 
10736 	if (!aconnector)
10737 		return 0;
10738 
10739 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10740 }
10741 #endif
10742 
10743 /**
10744  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10745  * @dev: The DRM device
10746  * @state: The atomic state to commit
10747  *
10748  * Validate that the given atomic state is programmable by DC into hardware.
10749  * This involves constructing a &struct dc_state reflecting the new hardware
10750  * state we wish to commit, then querying DC to see if it is programmable. It's
10751  * important not to modify the existing DC state. Otherwise, atomic_check
10752  * may unexpectedly commit hardware changes.
10753  *
10754  * When validating the DC state, it's important that the right locks are
10755  * acquired. For full updates case which removes/adds/updates streams on one
10756  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10757  * that any such full update commit will wait for completion of any outstanding
10758  * flip using DRMs synchronization events.
10759  *
10760  * Note that DM adds the affected connectors for all CRTCs in state, when that
10761  * might not seem necessary. This is because DC stream creation requires the
10762  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10763  * be possible but non-trivial - a possible TODO item.
10764  *
10765  * Return: -Error code if validation failed.
10766  */
10767 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10768 				  struct drm_atomic_state *state)
10769 {
10770 	struct amdgpu_device *adev = drm_to_adev(dev);
10771 	struct dm_atomic_state *dm_state = NULL;
10772 	struct dc *dc = adev->dm.dc;
10773 	struct drm_connector *connector;
10774 	struct drm_connector_state *old_con_state, *new_con_state;
10775 	struct drm_crtc *crtc;
10776 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10777 	struct drm_plane *plane;
10778 	struct drm_plane_state *old_plane_state, *new_plane_state;
10779 	enum dc_status status;
10780 	int ret, i;
10781 	bool lock_and_validation_needed = false;
10782 	struct dm_crtc_state *dm_old_crtc_state;
10783 #if defined(CONFIG_DRM_AMD_DC_DCN)
10784 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10785 	struct drm_dp_mst_topology_state *mst_state;
10786 	struct drm_dp_mst_topology_mgr *mgr;
10787 #endif
10788 
10789 	trace_amdgpu_dm_atomic_check_begin(state);
10790 
10791 	ret = drm_atomic_helper_check_modeset(dev, state);
10792 	if (ret)
10793 		goto fail;
10794 
10795 	/* Check connector changes */
10796 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10797 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10798 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10799 
10800 		/* Skip connectors that are disabled or part of modeset already. */
10801 		if (!old_con_state->crtc && !new_con_state->crtc)
10802 			continue;
10803 
10804 		if (!new_con_state->crtc)
10805 			continue;
10806 
10807 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10808 		if (IS_ERR(new_crtc_state)) {
10809 			ret = PTR_ERR(new_crtc_state);
10810 			goto fail;
10811 		}
10812 
10813 		if (dm_old_con_state->abm_level !=
10814 		    dm_new_con_state->abm_level)
10815 			new_crtc_state->connectors_changed = true;
10816 	}
10817 
10818 #if defined(CONFIG_DRM_AMD_DC_DCN)
10819 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10820 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10821 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10822 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10823 				if (ret)
10824 					goto fail;
10825 			}
10826 		}
10827 	}
10828 #endif
10829 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10830 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10831 
10832 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10833 		    !new_crtc_state->color_mgmt_changed &&
10834 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10835 			dm_old_crtc_state->dsc_force_changed == false)
10836 			continue;
10837 
10838 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10839 		if (ret)
10840 			goto fail;
10841 
10842 		if (!new_crtc_state->enable)
10843 			continue;
10844 
10845 		ret = drm_atomic_add_affected_connectors(state, crtc);
10846 		if (ret)
10847 			goto fail;
10848 
10849 		ret = drm_atomic_add_affected_planes(state, crtc);
10850 		if (ret)
10851 			goto fail;
10852 
10853 		if (dm_old_crtc_state->dsc_force_changed)
10854 			new_crtc_state->mode_changed = true;
10855 	}
10856 
10857 	/*
10858 	 * Add all primary and overlay planes on the CRTC to the state
10859 	 * whenever a plane is enabled to maintain correct z-ordering
10860 	 * and to enable fast surface updates.
10861 	 */
10862 	drm_for_each_crtc(crtc, dev) {
10863 		bool modified = false;
10864 
10865 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10866 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10867 				continue;
10868 
10869 			if (new_plane_state->crtc == crtc ||
10870 			    old_plane_state->crtc == crtc) {
10871 				modified = true;
10872 				break;
10873 			}
10874 		}
10875 
10876 		if (!modified)
10877 			continue;
10878 
10879 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10880 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10881 				continue;
10882 
10883 			new_plane_state =
10884 				drm_atomic_get_plane_state(state, plane);
10885 
10886 			if (IS_ERR(new_plane_state)) {
10887 				ret = PTR_ERR(new_plane_state);
10888 				goto fail;
10889 			}
10890 		}
10891 	}
10892 
10893 	/* Remove exiting planes if they are modified */
10894 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10895 		ret = dm_update_plane_state(dc, state, plane,
10896 					    old_plane_state,
10897 					    new_plane_state,
10898 					    false,
10899 					    &lock_and_validation_needed);
10900 		if (ret)
10901 			goto fail;
10902 	}
10903 
10904 	/* Disable all crtcs which require disable */
10905 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10906 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10907 					   old_crtc_state,
10908 					   new_crtc_state,
10909 					   false,
10910 					   &lock_and_validation_needed);
10911 		if (ret)
10912 			goto fail;
10913 	}
10914 
10915 	/* Enable all crtcs which require enable */
10916 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10917 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10918 					   old_crtc_state,
10919 					   new_crtc_state,
10920 					   true,
10921 					   &lock_and_validation_needed);
10922 		if (ret)
10923 			goto fail;
10924 	}
10925 
10926 	/* Add new/modified planes */
10927 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10928 		ret = dm_update_plane_state(dc, state, plane,
10929 					    old_plane_state,
10930 					    new_plane_state,
10931 					    true,
10932 					    &lock_and_validation_needed);
10933 		if (ret)
10934 			goto fail;
10935 	}
10936 
10937 	/* Run this here since we want to validate the streams we created */
10938 	ret = drm_atomic_helper_check_planes(dev, state);
10939 	if (ret)
10940 		goto fail;
10941 
10942 	/* Check cursor planes scaling */
10943 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10944 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10945 		if (ret)
10946 			goto fail;
10947 	}
10948 
10949 	if (state->legacy_cursor_update) {
10950 		/*
10951 		 * This is a fast cursor update coming from the plane update
10952 		 * helper, check if it can be done asynchronously for better
10953 		 * performance.
10954 		 */
10955 		state->async_update =
10956 			!drm_atomic_helper_async_check(dev, state);
10957 
10958 		/*
10959 		 * Skip the remaining global validation if this is an async
10960 		 * update. Cursor updates can be done without affecting
10961 		 * state or bandwidth calcs and this avoids the performance
10962 		 * penalty of locking the private state object and
10963 		 * allocating a new dc_state.
10964 		 */
10965 		if (state->async_update)
10966 			return 0;
10967 	}
10968 
10969 	/* Check scaling and underscan changes*/
10970 	/* TODO Removed scaling changes validation due to inability to commit
10971 	 * new stream into context w\o causing full reset. Need to
10972 	 * decide how to handle.
10973 	 */
10974 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10975 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10976 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10977 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10978 
10979 		/* Skip any modesets/resets */
10980 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10981 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10982 			continue;
10983 
10984 		/* Skip any thing not scale or underscan changes */
10985 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10986 			continue;
10987 
10988 		lock_and_validation_needed = true;
10989 	}
10990 
10991 #if defined(CONFIG_DRM_AMD_DC_DCN)
10992 	/* set the slot info for each mst_state based on the link encoding format */
10993 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10994 		struct amdgpu_dm_connector *aconnector;
10995 		struct drm_connector *connector;
10996 		struct drm_connector_list_iter iter;
10997 		u8 link_coding_cap;
10998 
10999 		if (!mgr->mst_state )
11000 			continue;
11001 
11002 		drm_connector_list_iter_begin(dev, &iter);
11003 		drm_for_each_connector_iter(connector, &iter) {
11004 			int id = connector->index;
11005 
11006 			if (id == mst_state->mgr->conn_base_id) {
11007 				aconnector = to_amdgpu_dm_connector(connector);
11008 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11009 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11010 
11011 				break;
11012 			}
11013 		}
11014 		drm_connector_list_iter_end(&iter);
11015 
11016 	}
11017 #endif
11018 	/**
11019 	 * Streams and planes are reset when there are changes that affect
11020 	 * bandwidth. Anything that affects bandwidth needs to go through
11021 	 * DC global validation to ensure that the configuration can be applied
11022 	 * to hardware.
11023 	 *
11024 	 * We have to currently stall out here in atomic_check for outstanding
11025 	 * commits to finish in this case because our IRQ handlers reference
11026 	 * DRM state directly - we can end up disabling interrupts too early
11027 	 * if we don't.
11028 	 *
11029 	 * TODO: Remove this stall and drop DM state private objects.
11030 	 */
11031 	if (lock_and_validation_needed) {
11032 		ret = dm_atomic_get_state(state, &dm_state);
11033 		if (ret)
11034 			goto fail;
11035 
11036 		ret = do_aquire_global_lock(dev, state);
11037 		if (ret)
11038 			goto fail;
11039 
11040 #if defined(CONFIG_DRM_AMD_DC_DCN)
11041 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
11042 			goto fail;
11043 
11044 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11045 		if (ret)
11046 			goto fail;
11047 #endif
11048 
11049 		/*
11050 		 * Perform validation of MST topology in the state:
11051 		 * We need to perform MST atomic check before calling
11052 		 * dc_validate_global_state(), or there is a chance
11053 		 * to get stuck in an infinite loop and hang eventually.
11054 		 */
11055 		ret = drm_dp_mst_atomic_check(state);
11056 		if (ret)
11057 			goto fail;
11058 		status = dc_validate_global_state(dc, dm_state->context, false);
11059 		if (status != DC_OK) {
11060 			drm_dbg_atomic(dev,
11061 				       "DC global validation failure: %s (%d)",
11062 				       dc_status_to_str(status), status);
11063 			ret = -EINVAL;
11064 			goto fail;
11065 		}
11066 	} else {
11067 		/*
11068 		 * The commit is a fast update. Fast updates shouldn't change
11069 		 * the DC context, affect global validation, and can have their
11070 		 * commit work done in parallel with other commits not touching
11071 		 * the same resource. If we have a new DC context as part of
11072 		 * the DM atomic state from validation we need to free it and
11073 		 * retain the existing one instead.
11074 		 *
11075 		 * Furthermore, since the DM atomic state only contains the DC
11076 		 * context and can safely be annulled, we can free the state
11077 		 * and clear the associated private object now to free
11078 		 * some memory and avoid a possible use-after-free later.
11079 		 */
11080 
11081 		for (i = 0; i < state->num_private_objs; i++) {
11082 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11083 
11084 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11085 				int j = state->num_private_objs-1;
11086 
11087 				dm_atomic_destroy_state(obj,
11088 						state->private_objs[i].state);
11089 
11090 				/* If i is not at the end of the array then the
11091 				 * last element needs to be moved to where i was
11092 				 * before the array can safely be truncated.
11093 				 */
11094 				if (i != j)
11095 					state->private_objs[i] =
11096 						state->private_objs[j];
11097 
11098 				state->private_objs[j].ptr = NULL;
11099 				state->private_objs[j].state = NULL;
11100 				state->private_objs[j].old_state = NULL;
11101 				state->private_objs[j].new_state = NULL;
11102 
11103 				state->num_private_objs = j;
11104 				break;
11105 			}
11106 		}
11107 	}
11108 
11109 	/* Store the overall update type for use later in atomic check. */
11110 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11111 		struct dm_crtc_state *dm_new_crtc_state =
11112 			to_dm_crtc_state(new_crtc_state);
11113 
11114 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11115 							 UPDATE_TYPE_FULL :
11116 							 UPDATE_TYPE_FAST;
11117 	}
11118 
11119 	/* Must be success */
11120 	WARN_ON(ret);
11121 
11122 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11123 
11124 	return ret;
11125 
11126 fail:
11127 	if (ret == -EDEADLK)
11128 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11129 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11130 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11131 	else
11132 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11133 
11134 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11135 
11136 	return ret;
11137 }
11138 
11139 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11140 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11141 {
11142 	uint8_t dpcd_data;
11143 	bool capable = false;
11144 
11145 	if (amdgpu_dm_connector->dc_link &&
11146 		dm_helpers_dp_read_dpcd(
11147 				NULL,
11148 				amdgpu_dm_connector->dc_link,
11149 				DP_DOWN_STREAM_PORT_COUNT,
11150 				&dpcd_data,
11151 				sizeof(dpcd_data))) {
11152 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11153 	}
11154 
11155 	return capable;
11156 }
11157 
11158 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11159 		unsigned int offset,
11160 		unsigned int total_length,
11161 		uint8_t *data,
11162 		unsigned int length,
11163 		struct amdgpu_hdmi_vsdb_info *vsdb)
11164 {
11165 	bool res;
11166 	union dmub_rb_cmd cmd;
11167 	struct dmub_cmd_send_edid_cea *input;
11168 	struct dmub_cmd_edid_cea_output *output;
11169 
11170 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11171 		return false;
11172 
11173 	memset(&cmd, 0, sizeof(cmd));
11174 
11175 	input = &cmd.edid_cea.data.input;
11176 
11177 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11178 	cmd.edid_cea.header.sub_type = 0;
11179 	cmd.edid_cea.header.payload_bytes =
11180 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11181 	input->offset = offset;
11182 	input->length = length;
11183 	input->total_length = total_length;
11184 	memcpy(input->payload, data, length);
11185 
11186 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11187 	if (!res) {
11188 		DRM_ERROR("EDID CEA parser failed\n");
11189 		return false;
11190 	}
11191 
11192 	output = &cmd.edid_cea.data.output;
11193 
11194 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11195 		if (!output->ack.success) {
11196 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11197 					output->ack.offset);
11198 		}
11199 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11200 		if (!output->amd_vsdb.vsdb_found)
11201 			return false;
11202 
11203 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11204 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11205 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11206 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11207 	} else {
11208 		DRM_WARN("Unknown EDID CEA parser results\n");
11209 		return false;
11210 	}
11211 
11212 	return true;
11213 }
11214 
11215 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11216 		uint8_t *edid_ext, int len,
11217 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11218 {
11219 	int i;
11220 
11221 	/* send extension block to DMCU for parsing */
11222 	for (i = 0; i < len; i += 8) {
11223 		bool res;
11224 		int offset;
11225 
11226 		/* send 8 bytes a time */
11227 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11228 			return false;
11229 
11230 		if (i+8 == len) {
11231 			/* EDID block sent completed, expect result */
11232 			int version, min_rate, max_rate;
11233 
11234 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11235 			if (res) {
11236 				/* amd vsdb found */
11237 				vsdb_info->freesync_supported = 1;
11238 				vsdb_info->amd_vsdb_version = version;
11239 				vsdb_info->min_refresh_rate_hz = min_rate;
11240 				vsdb_info->max_refresh_rate_hz = max_rate;
11241 				return true;
11242 			}
11243 			/* not amd vsdb */
11244 			return false;
11245 		}
11246 
11247 		/* check for ack*/
11248 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11249 		if (!res)
11250 			return false;
11251 	}
11252 
11253 	return false;
11254 }
11255 
11256 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11257 		uint8_t *edid_ext, int len,
11258 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11259 {
11260 	int i;
11261 
11262 	/* send extension block to DMCU for parsing */
11263 	for (i = 0; i < len; i += 8) {
11264 		/* send 8 bytes a time */
11265 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11266 			return false;
11267 	}
11268 
11269 	return vsdb_info->freesync_supported;
11270 }
11271 
11272 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11273 		uint8_t *edid_ext, int len,
11274 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11275 {
11276 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11277 
11278 	if (adev->dm.dmub_srv)
11279 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11280 	else
11281 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11282 }
11283 
11284 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11285 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11286 {
11287 	uint8_t *edid_ext = NULL;
11288 	int i;
11289 	bool valid_vsdb_found = false;
11290 
11291 	/*----- drm_find_cea_extension() -----*/
11292 	/* No EDID or EDID extensions */
11293 	if (edid == NULL || edid->extensions == 0)
11294 		return -ENODEV;
11295 
11296 	/* Find CEA extension */
11297 	for (i = 0; i < edid->extensions; i++) {
11298 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11299 		if (edid_ext[0] == CEA_EXT)
11300 			break;
11301 	}
11302 
11303 	if (i == edid->extensions)
11304 		return -ENODEV;
11305 
11306 	/*----- cea_db_offsets() -----*/
11307 	if (edid_ext[0] != CEA_EXT)
11308 		return -ENODEV;
11309 
11310 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11311 
11312 	return valid_vsdb_found ? i : -ENODEV;
11313 }
11314 
11315 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11316 					struct edid *edid)
11317 {
11318 	int i = 0;
11319 	struct detailed_timing *timing;
11320 	struct detailed_non_pixel *data;
11321 	struct detailed_data_monitor_range *range;
11322 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11323 			to_amdgpu_dm_connector(connector);
11324 	struct dm_connector_state *dm_con_state = NULL;
11325 	struct dc_sink *sink;
11326 
11327 	struct drm_device *dev = connector->dev;
11328 	struct amdgpu_device *adev = drm_to_adev(dev);
11329 	bool freesync_capable = false;
11330 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11331 
11332 	if (!connector->state) {
11333 		DRM_ERROR("%s - Connector has no state", __func__);
11334 		goto update;
11335 	}
11336 
11337 	sink = amdgpu_dm_connector->dc_sink ?
11338 		amdgpu_dm_connector->dc_sink :
11339 		amdgpu_dm_connector->dc_em_sink;
11340 
11341 	if (!edid || !sink) {
11342 		dm_con_state = to_dm_connector_state(connector->state);
11343 
11344 		amdgpu_dm_connector->min_vfreq = 0;
11345 		amdgpu_dm_connector->max_vfreq = 0;
11346 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11347 		connector->display_info.monitor_range.min_vfreq = 0;
11348 		connector->display_info.monitor_range.max_vfreq = 0;
11349 		freesync_capable = false;
11350 
11351 		goto update;
11352 	}
11353 
11354 	dm_con_state = to_dm_connector_state(connector->state);
11355 
11356 	if (!adev->dm.freesync_module)
11357 		goto update;
11358 
11359 
11360 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11361 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11362 		bool edid_check_required = false;
11363 
11364 		if (edid) {
11365 			edid_check_required = is_dp_capable_without_timing_msa(
11366 						adev->dm.dc,
11367 						amdgpu_dm_connector);
11368 		}
11369 
11370 		if (edid_check_required == true && (edid->version > 1 ||
11371 		   (edid->version == 1 && edid->revision > 1))) {
11372 			for (i = 0; i < 4; i++) {
11373 
11374 				timing	= &edid->detailed_timings[i];
11375 				data	= &timing->data.other_data;
11376 				range	= &data->data.range;
11377 				/*
11378 				 * Check if monitor has continuous frequency mode
11379 				 */
11380 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11381 					continue;
11382 				/*
11383 				 * Check for flag range limits only. If flag == 1 then
11384 				 * no additional timing information provided.
11385 				 * Default GTF, GTF Secondary curve and CVT are not
11386 				 * supported
11387 				 */
11388 				if (range->flags != 1)
11389 					continue;
11390 
11391 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11392 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11393 				amdgpu_dm_connector->pixel_clock_mhz =
11394 					range->pixel_clock_mhz * 10;
11395 
11396 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11397 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11398 
11399 				break;
11400 			}
11401 
11402 			if (amdgpu_dm_connector->max_vfreq -
11403 			    amdgpu_dm_connector->min_vfreq > 10) {
11404 
11405 				freesync_capable = true;
11406 			}
11407 		}
11408 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11409 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11410 		if (i >= 0 && vsdb_info.freesync_supported) {
11411 			timing  = &edid->detailed_timings[i];
11412 			data    = &timing->data.other_data;
11413 
11414 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11415 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11416 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11417 				freesync_capable = true;
11418 
11419 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11420 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11421 		}
11422 	}
11423 
11424 update:
11425 	if (dm_con_state)
11426 		dm_con_state->freesync_capable = freesync_capable;
11427 
11428 	if (connector->vrr_capable_property)
11429 		drm_connector_set_vrr_capable_property(connector,
11430 						       freesync_capable);
11431 }
11432 
11433 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11434 {
11435 	struct amdgpu_device *adev = drm_to_adev(dev);
11436 	struct dc *dc = adev->dm.dc;
11437 	int i;
11438 
11439 	mutex_lock(&adev->dm.dc_lock);
11440 	if (dc->current_state) {
11441 		for (i = 0; i < dc->current_state->stream_count; ++i)
11442 			dc->current_state->streams[i]
11443 				->triggered_crtc_reset.enabled =
11444 				adev->dm.force_timing_sync;
11445 
11446 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11447 		dc_trigger_sync(dc, dc->current_state);
11448 	}
11449 	mutex_unlock(&adev->dm.dc_lock);
11450 }
11451 
11452 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11453 		       uint32_t value, const char *func_name)
11454 {
11455 #ifdef DM_CHECK_ADDR_0
11456 	if (address == 0) {
11457 		DC_ERR("invalid register write. address = 0");
11458 		return;
11459 	}
11460 #endif
11461 	cgs_write_register(ctx->cgs_device, address, value);
11462 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11463 }
11464 
11465 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11466 			  const char *func_name)
11467 {
11468 	uint32_t value;
11469 #ifdef DM_CHECK_ADDR_0
11470 	if (address == 0) {
11471 		DC_ERR("invalid register read; address = 0\n");
11472 		return 0;
11473 	}
11474 #endif
11475 
11476 	if (ctx->dmub_srv &&
11477 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11478 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11479 		ASSERT(false);
11480 		return 0;
11481 	}
11482 
11483 	value = cgs_read_register(ctx->cgs_device, address);
11484 
11485 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11486 
11487 	return value;
11488 }
11489 
11490 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11491 	uint8_t status_type, uint32_t *operation_result)
11492 {
11493 	struct amdgpu_device *adev = ctx->driver_context;
11494 	int return_status = -1;
11495 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11496 
11497 	if (is_cmd_aux) {
11498 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11499 			return_status = p_notify->aux_reply.length;
11500 			*operation_result = p_notify->result;
11501 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11502 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11503 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11504 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11505 		} else {
11506 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11507 		}
11508 	} else {
11509 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11510 			return_status = 0;
11511 			*operation_result = p_notify->sc_status;
11512 		} else {
11513 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11514 		}
11515 	}
11516 
11517 	return return_status;
11518 }
11519 
11520 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11521 	unsigned int link_index, void *cmd_payload, void *operation_result)
11522 {
11523 	struct amdgpu_device *adev = ctx->driver_context;
11524 	int ret = 0;
11525 
11526 	if (is_cmd_aux) {
11527 		dc_process_dmub_aux_transfer_async(ctx->dc,
11528 			link_index, (struct aux_payload *)cmd_payload);
11529 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11530 					(struct set_config_cmd_payload *)cmd_payload,
11531 					adev->dm.dmub_notify)) {
11532 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11533 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11534 					(uint32_t *)operation_result);
11535 	}
11536 
11537 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11538 	if (ret == 0) {
11539 		DRM_ERROR("wait_for_completion_timeout timeout!");
11540 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11541 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11542 				(uint32_t *)operation_result);
11543 	}
11544 
11545 	if (is_cmd_aux) {
11546 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11547 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11548 
11549 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11550 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11551 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11552 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11553 				       adev->dm.dmub_notify->aux_reply.length);
11554 			}
11555 		}
11556 	}
11557 
11558 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11559 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11560 			(uint32_t *)operation_result);
11561 }
11562