1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 
55 #include "amd_shared.h"
56 #include "amdgpu_dm_irq.h"
57 #include "dm_helpers.h"
58 #include "amdgpu_dm_mst_types.h"
59 #if defined(CONFIG_DEBUG_FS)
60 #include "amdgpu_dm_debugfs.h"
61 #endif
62 #include "amdgpu_dm_psr.h"
63 
64 #include "ivsrcid/ivsrcid_vislands30.h"
65 
66 #include "i2caux_interface.h"
67 #include <linux/module.h>
68 #include <linux/moduleparam.h>
69 #include <linux/types.h>
70 #include <linux/pm_runtime.h>
71 #include <linux/pci.h>
72 #include <linux/firmware.h>
73 #include <linux/component.h>
74 
75 #include <drm/drm_atomic.h>
76 #include <drm/drm_atomic_uapi.h>
77 #include <drm/drm_atomic_helper.h>
78 #include <drm/drm_dp_mst_helper.h>
79 #include <drm/drm_fb_helper.h>
80 #include <drm/drm_fourcc.h>
81 #include <drm/drm_edid.h>
82 #include <drm/drm_vblank.h>
83 #include <drm/drm_audio_component.h>
84 
85 #if defined(CONFIG_DRM_AMD_DC_DCN)
86 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
87 
88 #include "dcn/dcn_1_0_offset.h"
89 #include "dcn/dcn_1_0_sh_mask.h"
90 #include "soc15_hw_ip.h"
91 #include "vega10_ip_offset.h"
92 
93 #include "soc15_common.h"
94 #endif
95 
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99 
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 
117 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119 
120 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122 
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
125 
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
128 
129 /**
130  * DOC: overview
131  *
132  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134  * requests into DC requests, and DC responses into DRM responses.
135  *
136  * The root control structure is &struct amdgpu_display_manager.
137  */
138 
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143 
144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 {
146 	switch (link->dpcd_caps.dongle_type) {
147 	case DISPLAY_DONGLE_NONE:
148 		return DRM_MODE_SUBCONNECTOR_Native;
149 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 		return DRM_MODE_SUBCONNECTOR_VGA;
151 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 		return DRM_MODE_SUBCONNECTOR_DVID;
154 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 		return DRM_MODE_SUBCONNECTOR_HDMIA;
157 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 	default:
159 		return DRM_MODE_SUBCONNECTOR_Unknown;
160 	}
161 }
162 
163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 {
165 	struct dc_link *link = aconnector->dc_link;
166 	struct drm_connector *connector = &aconnector->base;
167 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168 
169 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 		return;
171 
172 	if (aconnector->dc_sink)
173 		subconnector = get_subconnector_type(link);
174 
175 	drm_object_property_set_value(&connector->base,
176 			connector->dev->mode_config.dp_subconnector_property,
177 			subconnector);
178 }
179 
180 /*
181  * initializes drm_device display related structures, based on the information
182  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183  * drm_encoder, drm_mode_config
184  *
185  * Returns 0 on success
186  */
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190 
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192 				struct drm_plane *plane,
193 				unsigned long possible_crtcs,
194 				const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 			       struct drm_plane *plane,
197 			       uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
200 				    uint32_t link_index,
201 				    struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 				  struct amdgpu_encoder *aencoder,
204 				  uint32_t link_index);
205 
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207 
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209 
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 				  struct drm_atomic_state *state);
212 
213 static void handle_cursor_update(struct drm_plane *plane,
214 				 struct drm_plane_state *old_plane_state);
215 
216 static const struct drm_format_info *
217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218 
219 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
220 static void handle_hpd_rx_irq(void *param);
221 
222 static bool
223 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
224 				 struct drm_crtc_state *new_crtc_state);
225 /*
226  * dm_vblank_get_counter
227  *
228  * @brief
229  * Get counter for number of vertical blanks
230  *
231  * @param
232  * struct amdgpu_device *adev - [in] desired amdgpu device
233  * int disp_idx - [in] which CRTC to get the counter from
234  *
235  * @return
236  * Counter for vertical blanks
237  */
238 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
239 {
240 	if (crtc >= adev->mode_info.num_crtc)
241 		return 0;
242 	else {
243 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
244 
245 		if (acrtc->dm_irq_params.stream == NULL) {
246 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
247 				  crtc);
248 			return 0;
249 		}
250 
251 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
252 	}
253 }
254 
255 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
256 				  u32 *vbl, u32 *position)
257 {
258 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
259 
260 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
261 		return -EINVAL;
262 	else {
263 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
264 
265 		if (acrtc->dm_irq_params.stream ==  NULL) {
266 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
267 				  crtc);
268 			return 0;
269 		}
270 
271 		/*
272 		 * TODO rework base driver to use values directly.
273 		 * for now parse it back into reg-format
274 		 */
275 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
276 					 &v_blank_start,
277 					 &v_blank_end,
278 					 &h_position,
279 					 &v_position);
280 
281 		*position = v_position | (h_position << 16);
282 		*vbl = v_blank_start | (v_blank_end << 16);
283 	}
284 
285 	return 0;
286 }
287 
288 static bool dm_is_idle(void *handle)
289 {
290 	/* XXX todo */
291 	return true;
292 }
293 
294 static int dm_wait_for_idle(void *handle)
295 {
296 	/* XXX todo */
297 	return 0;
298 }
299 
300 static bool dm_check_soft_reset(void *handle)
301 {
302 	return false;
303 }
304 
305 static int dm_soft_reset(void *handle)
306 {
307 	/* XXX todo */
308 	return 0;
309 }
310 
311 static struct amdgpu_crtc *
312 get_crtc_by_otg_inst(struct amdgpu_device *adev,
313 		     int otg_inst)
314 {
315 	struct drm_device *dev = adev_to_drm(adev);
316 	struct drm_crtc *crtc;
317 	struct amdgpu_crtc *amdgpu_crtc;
318 
319 	if (WARN_ON(otg_inst == -1))
320 		return adev->mode_info.crtcs[0];
321 
322 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
323 		amdgpu_crtc = to_amdgpu_crtc(crtc);
324 
325 		if (amdgpu_crtc->otg_inst == otg_inst)
326 			return amdgpu_crtc;
327 	}
328 
329 	return NULL;
330 }
331 
332 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333 {
334 	return acrtc->dm_irq_params.freesync_config.state ==
335 		       VRR_STATE_ACTIVE_VARIABLE ||
336 	       acrtc->dm_irq_params.freesync_config.state ==
337 		       VRR_STATE_ACTIVE_FIXED;
338 }
339 
340 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341 {
342 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
343 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
344 }
345 
346 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
347 					      struct dm_crtc_state *new_state)
348 {
349 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
350 		return true;
351 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
352 		return true;
353 	else
354 		return false;
355 }
356 
357 /**
358  * dm_pflip_high_irq() - Handle pageflip interrupt
359  * @interrupt_params: ignored
360  *
361  * Handles the pageflip interrupt by notifying all interested parties
362  * that the pageflip has been completed.
363  */
364 static void dm_pflip_high_irq(void *interrupt_params)
365 {
366 	struct amdgpu_crtc *amdgpu_crtc;
367 	struct common_irq_params *irq_params = interrupt_params;
368 	struct amdgpu_device *adev = irq_params->adev;
369 	unsigned long flags;
370 	struct drm_pending_vblank_event *e;
371 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
372 	bool vrr_active;
373 
374 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375 
376 	/* IRQ could occur when in initial stage */
377 	/* TODO work and BO cleanup */
378 	if (amdgpu_crtc == NULL) {
379 		DC_LOG_PFLIP("CRTC is null, returning.\n");
380 		return;
381 	}
382 
383 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
384 
385 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
386 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
387 						 amdgpu_crtc->pflip_status,
388 						 AMDGPU_FLIP_SUBMITTED,
389 						 amdgpu_crtc->crtc_id,
390 						 amdgpu_crtc);
391 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
392 		return;
393 	}
394 
395 	/* page flip completed. */
396 	e = amdgpu_crtc->event;
397 	amdgpu_crtc->event = NULL;
398 
399 	WARN_ON(!e);
400 
401 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
402 
403 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
404 	if (!vrr_active ||
405 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
406 				      &v_blank_end, &hpos, &vpos) ||
407 	    (vpos < v_blank_start)) {
408 		/* Update to correct count and vblank timestamp if racing with
409 		 * vblank irq. This also updates to the correct vblank timestamp
410 		 * even in VRR mode, as scanout is past the front-porch atm.
411 		 */
412 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
413 
414 		/* Wake up userspace by sending the pageflip event with proper
415 		 * count and timestamp of vblank of flip completion.
416 		 */
417 		if (e) {
418 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
419 
420 			/* Event sent, so done with vblank for this flip */
421 			drm_crtc_vblank_put(&amdgpu_crtc->base);
422 		}
423 	} else if (e) {
424 		/* VRR active and inside front-porch: vblank count and
425 		 * timestamp for pageflip event will only be up to date after
426 		 * drm_crtc_handle_vblank() has been executed from late vblank
427 		 * irq handler after start of back-porch (vline 0). We queue the
428 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
429 		 * updated timestamp and count, once it runs after us.
430 		 *
431 		 * We need to open-code this instead of using the helper
432 		 * drm_crtc_arm_vblank_event(), as that helper would
433 		 * call drm_crtc_accurate_vblank_count(), which we must
434 		 * not call in VRR mode while we are in front-porch!
435 		 */
436 
437 		/* sequence will be replaced by real count during send-out. */
438 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
439 		e->pipe = amdgpu_crtc->crtc_id;
440 
441 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
442 		e = NULL;
443 	}
444 
445 	/* Keep track of vblank of this flip for flip throttling. We use the
446 	 * cooked hw counter, as that one incremented at start of this vblank
447 	 * of pageflip completion, so last_flip_vblank is the forbidden count
448 	 * for queueing new pageflips if vsync + VRR is enabled.
449 	 */
450 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
451 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
452 
453 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
454 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
455 
456 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
457 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
458 		     vrr_active, (int) !e);
459 }
460 
461 static void dm_vupdate_high_irq(void *interrupt_params)
462 {
463 	struct common_irq_params *irq_params = interrupt_params;
464 	struct amdgpu_device *adev = irq_params->adev;
465 	struct amdgpu_crtc *acrtc;
466 	struct drm_device *drm_dev;
467 	struct drm_vblank_crtc *vblank;
468 	ktime_t frame_duration_ns, previous_timestamp;
469 	unsigned long flags;
470 	int vrr_active;
471 
472 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
473 
474 	if (acrtc) {
475 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
476 		drm_dev = acrtc->base.dev;
477 		vblank = &drm_dev->vblank[acrtc->base.index];
478 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
479 		frame_duration_ns = vblank->time - previous_timestamp;
480 
481 		if (frame_duration_ns > 0) {
482 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
483 						frame_duration_ns,
484 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
485 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
486 		}
487 
488 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
489 			      acrtc->crtc_id,
490 			      vrr_active);
491 
492 		/* Core vblank handling is done here after end of front-porch in
493 		 * vrr mode, as vblank timestamping will give valid results
494 		 * while now done after front-porch. This will also deliver
495 		 * page-flip completion events that have been queued to us
496 		 * if a pageflip happened inside front-porch.
497 		 */
498 		if (vrr_active) {
499 			drm_crtc_handle_vblank(&acrtc->base);
500 
501 			/* BTR processing for pre-DCE12 ASICs */
502 			if (acrtc->dm_irq_params.stream &&
503 			    adev->family < AMDGPU_FAMILY_AI) {
504 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
505 				mod_freesync_handle_v_update(
506 				    adev->dm.freesync_module,
507 				    acrtc->dm_irq_params.stream,
508 				    &acrtc->dm_irq_params.vrr_params);
509 
510 				dc_stream_adjust_vmin_vmax(
511 				    adev->dm.dc,
512 				    acrtc->dm_irq_params.stream,
513 				    &acrtc->dm_irq_params.vrr_params.adjust);
514 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
515 			}
516 		}
517 	}
518 }
519 
520 /**
521  * dm_crtc_high_irq() - Handles CRTC interrupt
522  * @interrupt_params: used for determining the CRTC instance
523  *
524  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
525  * event handler.
526  */
527 static void dm_crtc_high_irq(void *interrupt_params)
528 {
529 	struct common_irq_params *irq_params = interrupt_params;
530 	struct amdgpu_device *adev = irq_params->adev;
531 	struct amdgpu_crtc *acrtc;
532 	unsigned long flags;
533 	int vrr_active;
534 
535 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
536 	if (!acrtc)
537 		return;
538 
539 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
540 
541 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
542 		      vrr_active, acrtc->dm_irq_params.active_planes);
543 
544 	/**
545 	 * Core vblank handling at start of front-porch is only possible
546 	 * in non-vrr mode, as only there vblank timestamping will give
547 	 * valid results while done in front-porch. Otherwise defer it
548 	 * to dm_vupdate_high_irq after end of front-porch.
549 	 */
550 	if (!vrr_active)
551 		drm_crtc_handle_vblank(&acrtc->base);
552 
553 	/**
554 	 * Following stuff must happen at start of vblank, for crc
555 	 * computation and below-the-range btr support in vrr mode.
556 	 */
557 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
558 
559 	/* BTR updates need to happen before VUPDATE on Vega and above. */
560 	if (adev->family < AMDGPU_FAMILY_AI)
561 		return;
562 
563 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
564 
565 	if (acrtc->dm_irq_params.stream &&
566 	    acrtc->dm_irq_params.vrr_params.supported &&
567 	    acrtc->dm_irq_params.freesync_config.state ==
568 		    VRR_STATE_ACTIVE_VARIABLE) {
569 		mod_freesync_handle_v_update(adev->dm.freesync_module,
570 					     acrtc->dm_irq_params.stream,
571 					     &acrtc->dm_irq_params.vrr_params);
572 
573 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
574 					   &acrtc->dm_irq_params.vrr_params.adjust);
575 	}
576 
577 	/*
578 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
579 	 * In that case, pageflip completion interrupts won't fire and pageflip
580 	 * completion events won't get delivered. Prevent this by sending
581 	 * pending pageflip events from here if a flip is still pending.
582 	 *
583 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
584 	 * avoid race conditions between flip programming and completion,
585 	 * which could cause too early flip completion events.
586 	 */
587 	if (adev->family >= AMDGPU_FAMILY_RV &&
588 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
589 	    acrtc->dm_irq_params.active_planes == 0) {
590 		if (acrtc->event) {
591 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
592 			acrtc->event = NULL;
593 			drm_crtc_vblank_put(&acrtc->base);
594 		}
595 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
596 	}
597 
598 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
599 }
600 
601 #if defined(CONFIG_DRM_AMD_DC_DCN)
602 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
603 /**
604  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
605  * DCN generation ASICs
606  * @interrupt_params: interrupt parameters
607  *
608  * Used to set crc window/read out crc value at vertical line 0 position
609  */
610 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
611 {
612 	struct common_irq_params *irq_params = interrupt_params;
613 	struct amdgpu_device *adev = irq_params->adev;
614 	struct amdgpu_crtc *acrtc;
615 
616 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
617 
618 	if (!acrtc)
619 		return;
620 
621 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
622 }
623 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
624 
625 /**
626  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
627  * @adev: amdgpu_device pointer
628  * @notify: dmub notification structure
629  *
630  * Dmub AUX or SET_CONFIG command completion processing callback
631  * Copies dmub notification to DM which is to be read by AUX command.
632  * issuing thread and also signals the event to wake up the thread.
633  */
634 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
635 {
636 	if (adev->dm.dmub_notify)
637 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
638 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
639 		complete(&adev->dm.dmub_aux_transfer_done);
640 }
641 
642 /**
643  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
644  * @adev: amdgpu_device pointer
645  * @notify: dmub notification structure
646  *
647  * Dmub Hpd interrupt processing callback. Gets displayindex through the
648  * ink index and calls helper to do the processing.
649  */
650 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
651 {
652 	struct amdgpu_dm_connector *aconnector;
653 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
654 	struct drm_connector *connector;
655 	struct drm_connector_list_iter iter;
656 	struct dc_link *link;
657 	uint8_t link_index = 0;
658 	struct drm_device *dev = adev->dm.ddev;
659 
660 	if (adev == NULL)
661 		return;
662 
663 	if (notify == NULL) {
664 		DRM_ERROR("DMUB HPD callback notification was NULL");
665 		return;
666 	}
667 
668 	if (notify->link_index > adev->dm.dc->link_count) {
669 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
670 		return;
671 	}
672 
673 	link_index = notify->link_index;
674 	link = adev->dm.dc->links[link_index];
675 
676 	drm_connector_list_iter_begin(dev, &iter);
677 	drm_for_each_connector_iter(connector, &iter) {
678 		aconnector = to_amdgpu_dm_connector(connector);
679 		if (link && aconnector->dc_link == link) {
680 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
681 			hpd_aconnector = aconnector;
682 			break;
683 		}
684 	}
685 	drm_connector_list_iter_end(&iter);
686 
687 	if (hpd_aconnector) {
688 		if (notify->type == DMUB_NOTIFICATION_HPD)
689 			handle_hpd_irq_helper(hpd_aconnector);
690 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
691 			handle_hpd_rx_irq(hpd_aconnector);
692 	}
693 }
694 
695 /**
696  * register_dmub_notify_callback - Sets callback for DMUB notify
697  * @adev: amdgpu_device pointer
698  * @type: Type of dmub notification
699  * @callback: Dmub interrupt callback function
700  * @dmub_int_thread_offload: offload indicator
701  *
702  * API to register a dmub callback handler for a dmub notification
703  * Also sets indicator whether callback processing to be offloaded.
704  * to dmub interrupt handling thread
705  * Return: true if successfully registered, false if there is existing registration
706  */
707 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
708 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
709 {
710 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
711 		adev->dm.dmub_callback[type] = callback;
712 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
713 	} else
714 		return false;
715 
716 	return true;
717 }
718 
719 static void dm_handle_hpd_work(struct work_struct *work)
720 {
721 	struct dmub_hpd_work *dmub_hpd_wrk;
722 
723 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
724 
725 	if (!dmub_hpd_wrk->dmub_notify) {
726 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
727 		return;
728 	}
729 
730 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
731 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
732 		dmub_hpd_wrk->dmub_notify);
733 	}
734 
735 	kfree(dmub_hpd_wrk->dmub_notify);
736 	kfree(dmub_hpd_wrk);
737 
738 }
739 
740 #define DMUB_TRACE_MAX_READ 64
741 /**
742  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
743  * @interrupt_params: used for determining the Outbox instance
744  *
745  * Handles the Outbox Interrupt
746  * event handler.
747  */
748 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
749 {
750 	struct dmub_notification notify;
751 	struct common_irq_params *irq_params = interrupt_params;
752 	struct amdgpu_device *adev = irq_params->adev;
753 	struct amdgpu_display_manager *dm = &adev->dm;
754 	struct dmcub_trace_buf_entry entry = { 0 };
755 	uint32_t count = 0;
756 	struct dmub_hpd_work *dmub_hpd_wrk;
757 	struct dc_link *plink = NULL;
758 
759 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
760 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
761 
762 		do {
763 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
764 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
765 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
766 				continue;
767 			}
768 			if (!dm->dmub_callback[notify.type]) {
769 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
770 				continue;
771 			}
772 			if (dm->dmub_thread_offload[notify.type] == true) {
773 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
774 				if (!dmub_hpd_wrk) {
775 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
776 					return;
777 				}
778 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
779 				if (!dmub_hpd_wrk->dmub_notify) {
780 					kfree(dmub_hpd_wrk);
781 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
782 					return;
783 				}
784 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
785 				if (dmub_hpd_wrk->dmub_notify)
786 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
787 				dmub_hpd_wrk->adev = adev;
788 				if (notify.type == DMUB_NOTIFICATION_HPD) {
789 					plink = adev->dm.dc->links[notify.link_index];
790 					if (plink) {
791 						plink->hpd_status =
792 							notify.hpd_status ==
793 							DP_HPD_PLUG ? true : false;
794 					}
795 				}
796 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
797 			} else {
798 				dm->dmub_callback[notify.type](adev, &notify);
799 			}
800 		} while (notify.pending_notification);
801 	}
802 
803 
804 	do {
805 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
806 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
807 							entry.param0, entry.param1);
808 
809 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
810 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
811 		} else
812 			break;
813 
814 		count++;
815 
816 	} while (count <= DMUB_TRACE_MAX_READ);
817 
818 	if (count > DMUB_TRACE_MAX_READ)
819 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
820 }
821 #endif /* CONFIG_DRM_AMD_DC_DCN */
822 
823 static int dm_set_clockgating_state(void *handle,
824 		  enum amd_clockgating_state state)
825 {
826 	return 0;
827 }
828 
829 static int dm_set_powergating_state(void *handle,
830 		  enum amd_powergating_state state)
831 {
832 	return 0;
833 }
834 
835 /* Prototypes of private functions */
836 static int dm_early_init(void* handle);
837 
838 /* Allocate memory for FBC compressed data  */
839 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
840 {
841 	struct drm_device *dev = connector->dev;
842 	struct amdgpu_device *adev = drm_to_adev(dev);
843 	struct dm_compressor_info *compressor = &adev->dm.compressor;
844 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
845 	struct drm_display_mode *mode;
846 	unsigned long max_size = 0;
847 
848 	if (adev->dm.dc->fbc_compressor == NULL)
849 		return;
850 
851 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
852 		return;
853 
854 	if (compressor->bo_ptr)
855 		return;
856 
857 
858 	list_for_each_entry(mode, &connector->modes, head) {
859 		if (max_size < mode->htotal * mode->vtotal)
860 			max_size = mode->htotal * mode->vtotal;
861 	}
862 
863 	if (max_size) {
864 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
865 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
866 			    &compressor->gpu_addr, &compressor->cpu_addr);
867 
868 		if (r)
869 			DRM_ERROR("DM: Failed to initialize FBC\n");
870 		else {
871 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
872 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
873 		}
874 
875 	}
876 
877 }
878 
879 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
880 					  int pipe, bool *enabled,
881 					  unsigned char *buf, int max_bytes)
882 {
883 	struct drm_device *dev = dev_get_drvdata(kdev);
884 	struct amdgpu_device *adev = drm_to_adev(dev);
885 	struct drm_connector *connector;
886 	struct drm_connector_list_iter conn_iter;
887 	struct amdgpu_dm_connector *aconnector;
888 	int ret = 0;
889 
890 	*enabled = false;
891 
892 	mutex_lock(&adev->dm.audio_lock);
893 
894 	drm_connector_list_iter_begin(dev, &conn_iter);
895 	drm_for_each_connector_iter(connector, &conn_iter) {
896 		aconnector = to_amdgpu_dm_connector(connector);
897 		if (aconnector->audio_inst != port)
898 			continue;
899 
900 		*enabled = true;
901 		ret = drm_eld_size(connector->eld);
902 		memcpy(buf, connector->eld, min(max_bytes, ret));
903 
904 		break;
905 	}
906 	drm_connector_list_iter_end(&conn_iter);
907 
908 	mutex_unlock(&adev->dm.audio_lock);
909 
910 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
911 
912 	return ret;
913 }
914 
915 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
916 	.get_eld = amdgpu_dm_audio_component_get_eld,
917 };
918 
919 static int amdgpu_dm_audio_component_bind(struct device *kdev,
920 				       struct device *hda_kdev, void *data)
921 {
922 	struct drm_device *dev = dev_get_drvdata(kdev);
923 	struct amdgpu_device *adev = drm_to_adev(dev);
924 	struct drm_audio_component *acomp = data;
925 
926 	acomp->ops = &amdgpu_dm_audio_component_ops;
927 	acomp->dev = kdev;
928 	adev->dm.audio_component = acomp;
929 
930 	return 0;
931 }
932 
933 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
934 					  struct device *hda_kdev, void *data)
935 {
936 	struct drm_device *dev = dev_get_drvdata(kdev);
937 	struct amdgpu_device *adev = drm_to_adev(dev);
938 	struct drm_audio_component *acomp = data;
939 
940 	acomp->ops = NULL;
941 	acomp->dev = NULL;
942 	adev->dm.audio_component = NULL;
943 }
944 
945 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
946 	.bind	= amdgpu_dm_audio_component_bind,
947 	.unbind	= amdgpu_dm_audio_component_unbind,
948 };
949 
950 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
951 {
952 	int i, ret;
953 
954 	if (!amdgpu_audio)
955 		return 0;
956 
957 	adev->mode_info.audio.enabled = true;
958 
959 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
960 
961 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
962 		adev->mode_info.audio.pin[i].channels = -1;
963 		adev->mode_info.audio.pin[i].rate = -1;
964 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
965 		adev->mode_info.audio.pin[i].status_bits = 0;
966 		adev->mode_info.audio.pin[i].category_code = 0;
967 		adev->mode_info.audio.pin[i].connected = false;
968 		adev->mode_info.audio.pin[i].id =
969 			adev->dm.dc->res_pool->audios[i]->inst;
970 		adev->mode_info.audio.pin[i].offset = 0;
971 	}
972 
973 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
974 	if (ret < 0)
975 		return ret;
976 
977 	adev->dm.audio_registered = true;
978 
979 	return 0;
980 }
981 
982 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
983 {
984 	if (!amdgpu_audio)
985 		return;
986 
987 	if (!adev->mode_info.audio.enabled)
988 		return;
989 
990 	if (adev->dm.audio_registered) {
991 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
992 		adev->dm.audio_registered = false;
993 	}
994 
995 	/* TODO: Disable audio? */
996 
997 	adev->mode_info.audio.enabled = false;
998 }
999 
1000 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1001 {
1002 	struct drm_audio_component *acomp = adev->dm.audio_component;
1003 
1004 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1005 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1006 
1007 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1008 						 pin, -1);
1009 	}
1010 }
1011 
1012 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1013 {
1014 	const struct dmcub_firmware_header_v1_0 *hdr;
1015 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1016 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1017 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1018 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1019 	struct abm *abm = adev->dm.dc->res_pool->abm;
1020 	struct dmub_srv_hw_params hw_params;
1021 	enum dmub_status status;
1022 	const unsigned char *fw_inst_const, *fw_bss_data;
1023 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1024 	bool has_hw_support;
1025 	struct dc *dc = adev->dm.dc;
1026 
1027 	if (!dmub_srv)
1028 		/* DMUB isn't supported on the ASIC. */
1029 		return 0;
1030 
1031 	if (!fb_info) {
1032 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1033 		return -EINVAL;
1034 	}
1035 
1036 	if (!dmub_fw) {
1037 		/* Firmware required for DMUB support. */
1038 		DRM_ERROR("No firmware provided for DMUB.\n");
1039 		return -EINVAL;
1040 	}
1041 
1042 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1043 	if (status != DMUB_STATUS_OK) {
1044 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1045 		return -EINVAL;
1046 	}
1047 
1048 	if (!has_hw_support) {
1049 		DRM_INFO("DMUB unsupported on ASIC\n");
1050 		return 0;
1051 	}
1052 
1053 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054 
1055 	fw_inst_const = dmub_fw->data +
1056 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1057 			PSP_HEADER_BYTES;
1058 
1059 	fw_bss_data = dmub_fw->data +
1060 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061 		      le32_to_cpu(hdr->inst_const_bytes);
1062 
1063 	/* Copy firmware and bios info into FB memory. */
1064 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066 
1067 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068 
1069 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1071 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1072 	 * will be done by dm_dmub_hw_init
1073 	 */
1074 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076 				fw_inst_const_size);
1077 	}
1078 
1079 	if (fw_bss_data_size)
1080 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081 		       fw_bss_data, fw_bss_data_size);
1082 
1083 	/* Copy firmware bios info into FB memory. */
1084 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1085 	       adev->bios_size);
1086 
1087 	/* Reset regions that need to be reset. */
1088 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090 
1091 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093 
1094 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1096 
1097 	/* Initialize hardware. */
1098 	memset(&hw_params, 0, sizeof(hw_params));
1099 	hw_params.fb_base = adev->gmc.fb_start;
1100 	hw_params.fb_offset = adev->gmc.aper_base;
1101 
1102 	/* backdoor load firmware and trigger dmub running */
1103 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104 		hw_params.load_inst_const = true;
1105 
1106 	if (dmcu)
1107 		hw_params.psp_version = dmcu->psp_version;
1108 
1109 	for (i = 0; i < fb_info->num_fb; ++i)
1110 		hw_params.fb[i] = &fb_info->fb[i];
1111 
1112 	switch (adev->asic_type) {
1113 	case CHIP_YELLOW_CARP:
1114 		if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1115 			hw_params.dpia_supported = true;
1116 #if defined(CONFIG_DRM_AMD_DC_DCN)
1117 			hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1118 #endif
1119 		}
1120 		break;
1121 	default:
1122 		break;
1123 	}
1124 
1125 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1126 	if (status != DMUB_STATUS_OK) {
1127 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1128 		return -EINVAL;
1129 	}
1130 
1131 	/* Wait for firmware load to finish. */
1132 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1133 	if (status != DMUB_STATUS_OK)
1134 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1135 
1136 	/* Init DMCU and ABM if available. */
1137 	if (dmcu && abm) {
1138 		dmcu->funcs->dmcu_init(dmcu);
1139 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1140 	}
1141 
1142 	if (!adev->dm.dc->ctx->dmub_srv)
1143 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1144 	if (!adev->dm.dc->ctx->dmub_srv) {
1145 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1146 		return -ENOMEM;
1147 	}
1148 
1149 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1150 		 adev->dm.dmcub_fw_version);
1151 
1152 	return 0;
1153 }
1154 
1155 #if defined(CONFIG_DRM_AMD_DC_DCN)
1156 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1157 {
1158 	uint64_t pt_base;
1159 	uint32_t logical_addr_low;
1160 	uint32_t logical_addr_high;
1161 	uint32_t agp_base, agp_bot, agp_top;
1162 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1163 
1164 	memset(pa_config, 0, sizeof(*pa_config));
1165 
1166 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1167 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1168 
1169 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1170 		/*
1171 		 * Raven2 has a HW issue that it is unable to use the vram which
1172 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1173 		 * workaround that increase system aperture high address (add 1)
1174 		 * to get rid of the VM fault and hardware hang.
1175 		 */
1176 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1177 	else
1178 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1179 
1180 	agp_base = 0;
1181 	agp_bot = adev->gmc.agp_start >> 24;
1182 	agp_top = adev->gmc.agp_end >> 24;
1183 
1184 
1185 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1186 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1187 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1188 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1189 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1190 	page_table_base.low_part = lower_32_bits(pt_base);
1191 
1192 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1193 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1194 
1195 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1196 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1197 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1198 
1199 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1200 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1201 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1202 
1203 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1204 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1205 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1206 
1207 	pa_config->is_hvm_enabled = 0;
1208 
1209 }
1210 #endif
1211 #if defined(CONFIG_DRM_AMD_DC_DCN)
1212 static void vblank_control_worker(struct work_struct *work)
1213 {
1214 	struct vblank_control_work *vblank_work =
1215 		container_of(work, struct vblank_control_work, work);
1216 	struct amdgpu_display_manager *dm = vblank_work->dm;
1217 
1218 	mutex_lock(&dm->dc_lock);
1219 
1220 	if (vblank_work->enable)
1221 		dm->active_vblank_irq_count++;
1222 	else if(dm->active_vblank_irq_count)
1223 		dm->active_vblank_irq_count--;
1224 
1225 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1226 
1227 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1228 
1229 	/* Control PSR based on vblank requirements from OS */
1230 	if (vblank_work->stream && vblank_work->stream->link) {
1231 		if (vblank_work->enable) {
1232 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1233 				amdgpu_dm_psr_disable(vblank_work->stream);
1234 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1235 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1236 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1237 			amdgpu_dm_psr_enable(vblank_work->stream);
1238 		}
1239 	}
1240 
1241 	mutex_unlock(&dm->dc_lock);
1242 
1243 	dc_stream_release(vblank_work->stream);
1244 
1245 	kfree(vblank_work);
1246 }
1247 
1248 #endif
1249 
1250 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1251 {
1252 	struct hpd_rx_irq_offload_work *offload_work;
1253 	struct amdgpu_dm_connector *aconnector;
1254 	struct dc_link *dc_link;
1255 	struct amdgpu_device *adev;
1256 	enum dc_connection_type new_connection_type = dc_connection_none;
1257 	unsigned long flags;
1258 
1259 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1260 	aconnector = offload_work->offload_wq->aconnector;
1261 
1262 	if (!aconnector) {
1263 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1264 		goto skip;
1265 	}
1266 
1267 	adev = drm_to_adev(aconnector->base.dev);
1268 	dc_link = aconnector->dc_link;
1269 
1270 	mutex_lock(&aconnector->hpd_lock);
1271 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1272 		DRM_ERROR("KMS: Failed to detect connector\n");
1273 	mutex_unlock(&aconnector->hpd_lock);
1274 
1275 	if (new_connection_type == dc_connection_none)
1276 		goto skip;
1277 
1278 	if (amdgpu_in_reset(adev))
1279 		goto skip;
1280 
1281 	mutex_lock(&adev->dm.dc_lock);
1282 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1283 		dc_link_dp_handle_automated_test(dc_link);
1284 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1285 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1286 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1287 		dc_link_dp_handle_link_loss(dc_link);
1288 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1289 		offload_work->offload_wq->is_handling_link_loss = false;
1290 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1291 	}
1292 	mutex_unlock(&adev->dm.dc_lock);
1293 
1294 skip:
1295 	kfree(offload_work);
1296 
1297 }
1298 
1299 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1300 {
1301 	int max_caps = dc->caps.max_links;
1302 	int i = 0;
1303 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1304 
1305 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1306 
1307 	if (!hpd_rx_offload_wq)
1308 		return NULL;
1309 
1310 
1311 	for (i = 0; i < max_caps; i++) {
1312 		hpd_rx_offload_wq[i].wq =
1313 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1314 
1315 		if (hpd_rx_offload_wq[i].wq == NULL) {
1316 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1317 			return NULL;
1318 		}
1319 
1320 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1321 	}
1322 
1323 	return hpd_rx_offload_wq;
1324 }
1325 
1326 struct amdgpu_stutter_quirk {
1327 	u16 chip_vendor;
1328 	u16 chip_device;
1329 	u16 subsys_vendor;
1330 	u16 subsys_device;
1331 	u8 revision;
1332 };
1333 
1334 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1335 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1336 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1337 	{ 0, 0, 0, 0, 0 },
1338 };
1339 
1340 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1341 {
1342 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1343 
1344 	while (p && p->chip_device != 0) {
1345 		if (pdev->vendor == p->chip_vendor &&
1346 		    pdev->device == p->chip_device &&
1347 		    pdev->subsystem_vendor == p->subsys_vendor &&
1348 		    pdev->subsystem_device == p->subsys_device &&
1349 		    pdev->revision == p->revision) {
1350 			return true;
1351 		}
1352 		++p;
1353 	}
1354 	return false;
1355 }
1356 
1357 static int amdgpu_dm_init(struct amdgpu_device *adev)
1358 {
1359 	struct dc_init_data init_data;
1360 #ifdef CONFIG_DRM_AMD_DC_HDCP
1361 	struct dc_callback_init init_params;
1362 #endif
1363 	int r;
1364 
1365 	adev->dm.ddev = adev_to_drm(adev);
1366 	adev->dm.adev = adev;
1367 
1368 	/* Zero all the fields */
1369 	memset(&init_data, 0, sizeof(init_data));
1370 #ifdef CONFIG_DRM_AMD_DC_HDCP
1371 	memset(&init_params, 0, sizeof(init_params));
1372 #endif
1373 
1374 	mutex_init(&adev->dm.dc_lock);
1375 	mutex_init(&adev->dm.audio_lock);
1376 #if defined(CONFIG_DRM_AMD_DC_DCN)
1377 	spin_lock_init(&adev->dm.vblank_lock);
1378 #endif
1379 
1380 	if(amdgpu_dm_irq_init(adev)) {
1381 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1382 		goto error;
1383 	}
1384 
1385 	init_data.asic_id.chip_family = adev->family;
1386 
1387 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1388 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1389 	init_data.asic_id.chip_id = adev->pdev->device;
1390 
1391 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1392 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1393 	init_data.asic_id.atombios_base_address =
1394 		adev->mode_info.atom_context->bios;
1395 
1396 	init_data.driver = adev;
1397 
1398 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1399 
1400 	if (!adev->dm.cgs_device) {
1401 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1402 		goto error;
1403 	}
1404 
1405 	init_data.cgs_device = adev->dm.cgs_device;
1406 
1407 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1408 
1409 	switch (adev->asic_type) {
1410 	case CHIP_CARRIZO:
1411 	case CHIP_STONEY:
1412 		init_data.flags.gpu_vm_support = true;
1413 		break;
1414 	default:
1415 		switch (adev->ip_versions[DCE_HWIP][0]) {
1416 		case IP_VERSION(2, 1, 0):
1417 			init_data.flags.gpu_vm_support = true;
1418 			switch (adev->dm.dmcub_fw_version) {
1419 			case 0: /* development */
1420 			case 0x1: /* linux-firmware.git hash 6d9f399 */
1421 			case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1422 				init_data.flags.disable_dmcu = false;
1423 				break;
1424 			default:
1425 				init_data.flags.disable_dmcu = true;
1426 			}
1427 			break;
1428 		case IP_VERSION(1, 0, 0):
1429 		case IP_VERSION(1, 0, 1):
1430 		case IP_VERSION(3, 0, 1):
1431 		case IP_VERSION(3, 1, 2):
1432 		case IP_VERSION(3, 1, 3):
1433 			init_data.flags.gpu_vm_support = true;
1434 			break;
1435 		case IP_VERSION(2, 0, 3):
1436 			init_data.flags.disable_dmcu = true;
1437 			break;
1438 		default:
1439 			break;
1440 		}
1441 		break;
1442 	}
1443 
1444 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1445 		init_data.flags.fbc_support = true;
1446 
1447 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1448 		init_data.flags.multi_mon_pp_mclk_switch = true;
1449 
1450 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1451 		init_data.flags.disable_fractional_pwm = true;
1452 
1453 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1454 		init_data.flags.edp_no_power_sequencing = true;
1455 
1456 	init_data.flags.power_down_display_on_boot = true;
1457 
1458 	INIT_LIST_HEAD(&adev->dm.da_list);
1459 	/* Display Core create. */
1460 	adev->dm.dc = dc_create(&init_data);
1461 
1462 	if (adev->dm.dc) {
1463 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1464 	} else {
1465 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1466 		goto error;
1467 	}
1468 
1469 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1470 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1471 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1472 	}
1473 
1474 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1475 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1476 	if (dm_should_disable_stutter(adev->pdev))
1477 		adev->dm.dc->debug.disable_stutter = true;
1478 
1479 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1480 		adev->dm.dc->debug.disable_stutter = true;
1481 
1482 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1483 		adev->dm.dc->debug.disable_dsc = true;
1484 
1485 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1486 		adev->dm.dc->debug.disable_clock_gate = true;
1487 
1488 	r = dm_dmub_hw_init(adev);
1489 	if (r) {
1490 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1491 		goto error;
1492 	}
1493 
1494 	dc_hardware_init(adev->dm.dc);
1495 
1496 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1497 	if (!adev->dm.hpd_rx_offload_wq) {
1498 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1499 		goto error;
1500 	}
1501 
1502 #if defined(CONFIG_DRM_AMD_DC_DCN)
1503 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1504 		struct dc_phy_addr_space_config pa_config;
1505 
1506 		mmhub_read_system_context(adev, &pa_config);
1507 
1508 		// Call the DC init_memory func
1509 		dc_setup_system_context(adev->dm.dc, &pa_config);
1510 	}
1511 #endif
1512 
1513 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1514 	if (!adev->dm.freesync_module) {
1515 		DRM_ERROR(
1516 		"amdgpu: failed to initialize freesync_module.\n");
1517 	} else
1518 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1519 				adev->dm.freesync_module);
1520 
1521 	amdgpu_dm_init_color_mod();
1522 
1523 #if defined(CONFIG_DRM_AMD_DC_DCN)
1524 	if (adev->dm.dc->caps.max_links > 0) {
1525 		adev->dm.vblank_control_workqueue =
1526 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1527 		if (!adev->dm.vblank_control_workqueue)
1528 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1529 	}
1530 #endif
1531 
1532 #ifdef CONFIG_DRM_AMD_DC_HDCP
1533 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1534 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1535 
1536 		if (!adev->dm.hdcp_workqueue)
1537 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1538 		else
1539 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1540 
1541 		dc_init_callbacks(adev->dm.dc, &init_params);
1542 	}
1543 #endif
1544 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1545 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1546 #endif
1547 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1548 		init_completion(&adev->dm.dmub_aux_transfer_done);
1549 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1550 		if (!adev->dm.dmub_notify) {
1551 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1552 			goto error;
1553 		}
1554 
1555 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1556 		if (!adev->dm.delayed_hpd_wq) {
1557 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1558 			goto error;
1559 		}
1560 
1561 		amdgpu_dm_outbox_init(adev);
1562 #if defined(CONFIG_DRM_AMD_DC_DCN)
1563 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1564 			dmub_aux_setconfig_callback, false)) {
1565 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1566 			goto error;
1567 		}
1568 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1569 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1570 			goto error;
1571 		}
1572 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1573 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1574 			goto error;
1575 		}
1576 #endif /* CONFIG_DRM_AMD_DC_DCN */
1577 	}
1578 
1579 	if (amdgpu_dm_initialize_drm_device(adev)) {
1580 		DRM_ERROR(
1581 		"amdgpu: failed to initialize sw for display support.\n");
1582 		goto error;
1583 	}
1584 
1585 	/* create fake encoders for MST */
1586 	dm_dp_create_fake_mst_encoders(adev);
1587 
1588 	/* TODO: Add_display_info? */
1589 
1590 	/* TODO use dynamic cursor width */
1591 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1592 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1593 
1594 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1595 		DRM_ERROR(
1596 		"amdgpu: failed to initialize sw for display support.\n");
1597 		goto error;
1598 	}
1599 
1600 
1601 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1602 
1603 	return 0;
1604 error:
1605 	amdgpu_dm_fini(adev);
1606 
1607 	return -EINVAL;
1608 }
1609 
1610 static int amdgpu_dm_early_fini(void *handle)
1611 {
1612 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1613 
1614 	amdgpu_dm_audio_fini(adev);
1615 
1616 	return 0;
1617 }
1618 
1619 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1620 {
1621 	int i;
1622 
1623 #if defined(CONFIG_DRM_AMD_DC_DCN)
1624 	if (adev->dm.vblank_control_workqueue) {
1625 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1626 		adev->dm.vblank_control_workqueue = NULL;
1627 	}
1628 #endif
1629 
1630 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1631 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1632 	}
1633 
1634 	amdgpu_dm_destroy_drm_device(&adev->dm);
1635 
1636 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1637 	if (adev->dm.crc_rd_wrk) {
1638 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1639 		kfree(adev->dm.crc_rd_wrk);
1640 		adev->dm.crc_rd_wrk = NULL;
1641 	}
1642 #endif
1643 #ifdef CONFIG_DRM_AMD_DC_HDCP
1644 	if (adev->dm.hdcp_workqueue) {
1645 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1646 		adev->dm.hdcp_workqueue = NULL;
1647 	}
1648 
1649 	if (adev->dm.dc)
1650 		dc_deinit_callbacks(adev->dm.dc);
1651 #endif
1652 
1653 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1654 
1655 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1656 		kfree(adev->dm.dmub_notify);
1657 		adev->dm.dmub_notify = NULL;
1658 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1659 		adev->dm.delayed_hpd_wq = NULL;
1660 	}
1661 
1662 	if (adev->dm.dmub_bo)
1663 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1664 				      &adev->dm.dmub_bo_gpu_addr,
1665 				      &adev->dm.dmub_bo_cpu_addr);
1666 
1667 	if (adev->dm.hpd_rx_offload_wq) {
1668 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1669 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1670 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1671 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1672 			}
1673 		}
1674 
1675 		kfree(adev->dm.hpd_rx_offload_wq);
1676 		adev->dm.hpd_rx_offload_wq = NULL;
1677 	}
1678 
1679 	/* DC Destroy TODO: Replace destroy DAL */
1680 	if (adev->dm.dc)
1681 		dc_destroy(&adev->dm.dc);
1682 	/*
1683 	 * TODO: pageflip, vlank interrupt
1684 	 *
1685 	 * amdgpu_dm_irq_fini(adev);
1686 	 */
1687 
1688 	if (adev->dm.cgs_device) {
1689 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1690 		adev->dm.cgs_device = NULL;
1691 	}
1692 	if (adev->dm.freesync_module) {
1693 		mod_freesync_destroy(adev->dm.freesync_module);
1694 		adev->dm.freesync_module = NULL;
1695 	}
1696 
1697 	mutex_destroy(&adev->dm.audio_lock);
1698 	mutex_destroy(&adev->dm.dc_lock);
1699 
1700 	return;
1701 }
1702 
1703 static int load_dmcu_fw(struct amdgpu_device *adev)
1704 {
1705 	const char *fw_name_dmcu = NULL;
1706 	int r;
1707 	const struct dmcu_firmware_header_v1_0 *hdr;
1708 
1709 	switch(adev->asic_type) {
1710 #if defined(CONFIG_DRM_AMD_DC_SI)
1711 	case CHIP_TAHITI:
1712 	case CHIP_PITCAIRN:
1713 	case CHIP_VERDE:
1714 	case CHIP_OLAND:
1715 #endif
1716 	case CHIP_BONAIRE:
1717 	case CHIP_HAWAII:
1718 	case CHIP_KAVERI:
1719 	case CHIP_KABINI:
1720 	case CHIP_MULLINS:
1721 	case CHIP_TONGA:
1722 	case CHIP_FIJI:
1723 	case CHIP_CARRIZO:
1724 	case CHIP_STONEY:
1725 	case CHIP_POLARIS11:
1726 	case CHIP_POLARIS10:
1727 	case CHIP_POLARIS12:
1728 	case CHIP_VEGAM:
1729 	case CHIP_VEGA10:
1730 	case CHIP_VEGA12:
1731 	case CHIP_VEGA20:
1732 		return 0;
1733 	case CHIP_NAVI12:
1734 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1735 		break;
1736 	case CHIP_RAVEN:
1737 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1738 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1739 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1740 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1741 		else
1742 			return 0;
1743 		break;
1744 	default:
1745 		switch (adev->ip_versions[DCE_HWIP][0]) {
1746 		case IP_VERSION(2, 0, 2):
1747 		case IP_VERSION(2, 0, 3):
1748 		case IP_VERSION(2, 0, 0):
1749 		case IP_VERSION(2, 1, 0):
1750 		case IP_VERSION(3, 0, 0):
1751 		case IP_VERSION(3, 0, 2):
1752 		case IP_VERSION(3, 0, 3):
1753 		case IP_VERSION(3, 0, 1):
1754 		case IP_VERSION(3, 1, 2):
1755 		case IP_VERSION(3, 1, 3):
1756 			return 0;
1757 		default:
1758 			break;
1759 		}
1760 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1761 		return -EINVAL;
1762 	}
1763 
1764 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1765 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1766 		return 0;
1767 	}
1768 
1769 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1770 	if (r == -ENOENT) {
1771 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1772 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1773 		adev->dm.fw_dmcu = NULL;
1774 		return 0;
1775 	}
1776 	if (r) {
1777 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1778 			fw_name_dmcu);
1779 		return r;
1780 	}
1781 
1782 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1783 	if (r) {
1784 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1785 			fw_name_dmcu);
1786 		release_firmware(adev->dm.fw_dmcu);
1787 		adev->dm.fw_dmcu = NULL;
1788 		return r;
1789 	}
1790 
1791 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1792 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1793 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1794 	adev->firmware.fw_size +=
1795 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1796 
1797 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1798 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1799 	adev->firmware.fw_size +=
1800 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1801 
1802 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1803 
1804 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1805 
1806 	return 0;
1807 }
1808 
1809 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1810 {
1811 	struct amdgpu_device *adev = ctx;
1812 
1813 	return dm_read_reg(adev->dm.dc->ctx, address);
1814 }
1815 
1816 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1817 				     uint32_t value)
1818 {
1819 	struct amdgpu_device *adev = ctx;
1820 
1821 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1822 }
1823 
1824 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1825 {
1826 	struct dmub_srv_create_params create_params;
1827 	struct dmub_srv_region_params region_params;
1828 	struct dmub_srv_region_info region_info;
1829 	struct dmub_srv_fb_params fb_params;
1830 	struct dmub_srv_fb_info *fb_info;
1831 	struct dmub_srv *dmub_srv;
1832 	const struct dmcub_firmware_header_v1_0 *hdr;
1833 	const char *fw_name_dmub;
1834 	enum dmub_asic dmub_asic;
1835 	enum dmub_status status;
1836 	int r;
1837 
1838 	switch (adev->ip_versions[DCE_HWIP][0]) {
1839 	case IP_VERSION(2, 1, 0):
1840 		dmub_asic = DMUB_ASIC_DCN21;
1841 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1842 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1843 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1844 		break;
1845 	case IP_VERSION(3, 0, 0):
1846 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1847 			dmub_asic = DMUB_ASIC_DCN30;
1848 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1849 		} else {
1850 			dmub_asic = DMUB_ASIC_DCN30;
1851 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1852 		}
1853 		break;
1854 	case IP_VERSION(3, 0, 1):
1855 		dmub_asic = DMUB_ASIC_DCN301;
1856 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1857 		break;
1858 	case IP_VERSION(3, 0, 2):
1859 		dmub_asic = DMUB_ASIC_DCN302;
1860 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1861 		break;
1862 	case IP_VERSION(3, 0, 3):
1863 		dmub_asic = DMUB_ASIC_DCN303;
1864 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1865 		break;
1866 	case IP_VERSION(3, 1, 2):
1867 	case IP_VERSION(3, 1, 3):
1868 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1869 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1870 		break;
1871 
1872 	default:
1873 		/* ASIC doesn't support DMUB. */
1874 		return 0;
1875 	}
1876 
1877 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1878 	if (r) {
1879 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1880 		return 0;
1881 	}
1882 
1883 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1884 	if (r) {
1885 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1886 		return 0;
1887 	}
1888 
1889 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1890 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1891 
1892 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1893 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1894 			AMDGPU_UCODE_ID_DMCUB;
1895 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1896 			adev->dm.dmub_fw;
1897 		adev->firmware.fw_size +=
1898 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1899 
1900 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1901 			 adev->dm.dmcub_fw_version);
1902 	}
1903 
1904 
1905 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1906 	dmub_srv = adev->dm.dmub_srv;
1907 
1908 	if (!dmub_srv) {
1909 		DRM_ERROR("Failed to allocate DMUB service!\n");
1910 		return -ENOMEM;
1911 	}
1912 
1913 	memset(&create_params, 0, sizeof(create_params));
1914 	create_params.user_ctx = adev;
1915 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1916 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1917 	create_params.asic = dmub_asic;
1918 
1919 	/* Create the DMUB service. */
1920 	status = dmub_srv_create(dmub_srv, &create_params);
1921 	if (status != DMUB_STATUS_OK) {
1922 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1923 		return -EINVAL;
1924 	}
1925 
1926 	/* Calculate the size of all the regions for the DMUB service. */
1927 	memset(&region_params, 0, sizeof(region_params));
1928 
1929 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1930 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1931 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1932 	region_params.vbios_size = adev->bios_size;
1933 	region_params.fw_bss_data = region_params.bss_data_size ?
1934 		adev->dm.dmub_fw->data +
1935 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1936 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1937 	region_params.fw_inst_const =
1938 		adev->dm.dmub_fw->data +
1939 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1940 		PSP_HEADER_BYTES;
1941 
1942 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1943 					   &region_info);
1944 
1945 	if (status != DMUB_STATUS_OK) {
1946 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1947 		return -EINVAL;
1948 	}
1949 
1950 	/*
1951 	 * Allocate a framebuffer based on the total size of all the regions.
1952 	 * TODO: Move this into GART.
1953 	 */
1954 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1955 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1956 				    &adev->dm.dmub_bo_gpu_addr,
1957 				    &adev->dm.dmub_bo_cpu_addr);
1958 	if (r)
1959 		return r;
1960 
1961 	/* Rebase the regions on the framebuffer address. */
1962 	memset(&fb_params, 0, sizeof(fb_params));
1963 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1964 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1965 	fb_params.region_info = &region_info;
1966 
1967 	adev->dm.dmub_fb_info =
1968 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1969 	fb_info = adev->dm.dmub_fb_info;
1970 
1971 	if (!fb_info) {
1972 		DRM_ERROR(
1973 			"Failed to allocate framebuffer info for DMUB service!\n");
1974 		return -ENOMEM;
1975 	}
1976 
1977 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1978 	if (status != DMUB_STATUS_OK) {
1979 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1980 		return -EINVAL;
1981 	}
1982 
1983 	return 0;
1984 }
1985 
1986 static int dm_sw_init(void *handle)
1987 {
1988 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1989 	int r;
1990 
1991 	r = dm_dmub_sw_init(adev);
1992 	if (r)
1993 		return r;
1994 
1995 	return load_dmcu_fw(adev);
1996 }
1997 
1998 static int dm_sw_fini(void *handle)
1999 {
2000 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2001 
2002 	kfree(adev->dm.dmub_fb_info);
2003 	adev->dm.dmub_fb_info = NULL;
2004 
2005 	if (adev->dm.dmub_srv) {
2006 		dmub_srv_destroy(adev->dm.dmub_srv);
2007 		adev->dm.dmub_srv = NULL;
2008 	}
2009 
2010 	release_firmware(adev->dm.dmub_fw);
2011 	adev->dm.dmub_fw = NULL;
2012 
2013 	release_firmware(adev->dm.fw_dmcu);
2014 	adev->dm.fw_dmcu = NULL;
2015 
2016 	return 0;
2017 }
2018 
2019 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2020 {
2021 	struct amdgpu_dm_connector *aconnector;
2022 	struct drm_connector *connector;
2023 	struct drm_connector_list_iter iter;
2024 	int ret = 0;
2025 
2026 	drm_connector_list_iter_begin(dev, &iter);
2027 	drm_for_each_connector_iter(connector, &iter) {
2028 		aconnector = to_amdgpu_dm_connector(connector);
2029 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2030 		    aconnector->mst_mgr.aux) {
2031 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2032 					 aconnector,
2033 					 aconnector->base.base.id);
2034 
2035 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2036 			if (ret < 0) {
2037 				DRM_ERROR("DM_MST: Failed to start MST\n");
2038 				aconnector->dc_link->type =
2039 					dc_connection_single;
2040 				break;
2041 			}
2042 		}
2043 	}
2044 	drm_connector_list_iter_end(&iter);
2045 
2046 	return ret;
2047 }
2048 
2049 static int dm_late_init(void *handle)
2050 {
2051 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2052 
2053 	struct dmcu_iram_parameters params;
2054 	unsigned int linear_lut[16];
2055 	int i;
2056 	struct dmcu *dmcu = NULL;
2057 
2058 	dmcu = adev->dm.dc->res_pool->dmcu;
2059 
2060 	for (i = 0; i < 16; i++)
2061 		linear_lut[i] = 0xFFFF * i / 15;
2062 
2063 	params.set = 0;
2064 	params.backlight_ramping_override = false;
2065 	params.backlight_ramping_start = 0xCCCC;
2066 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2067 	params.backlight_lut_array_size = 16;
2068 	params.backlight_lut_array = linear_lut;
2069 
2070 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2071 	 * 0xFFFF x 0.01 = 0x28F
2072 	 */
2073 	params.min_abm_backlight = 0x28F;
2074 	/* In the case where abm is implemented on dmcub,
2075 	* dmcu object will be null.
2076 	* ABM 2.4 and up are implemented on dmcub.
2077 	*/
2078 	if (dmcu) {
2079 		if (!dmcu_load_iram(dmcu, params))
2080 			return -EINVAL;
2081 	} else if (adev->dm.dc->ctx->dmub_srv) {
2082 		struct dc_link *edp_links[MAX_NUM_EDP];
2083 		int edp_num;
2084 
2085 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2086 		for (i = 0; i < edp_num; i++) {
2087 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2088 				return -EINVAL;
2089 		}
2090 	}
2091 
2092 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2093 }
2094 
2095 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2096 {
2097 	struct amdgpu_dm_connector *aconnector;
2098 	struct drm_connector *connector;
2099 	struct drm_connector_list_iter iter;
2100 	struct drm_dp_mst_topology_mgr *mgr;
2101 	int ret;
2102 	bool need_hotplug = false;
2103 
2104 	drm_connector_list_iter_begin(dev, &iter);
2105 	drm_for_each_connector_iter(connector, &iter) {
2106 		aconnector = to_amdgpu_dm_connector(connector);
2107 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2108 		    aconnector->mst_port)
2109 			continue;
2110 
2111 		mgr = &aconnector->mst_mgr;
2112 
2113 		if (suspend) {
2114 			drm_dp_mst_topology_mgr_suspend(mgr);
2115 		} else {
2116 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2117 			if (ret < 0) {
2118 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2119 				need_hotplug = true;
2120 			}
2121 		}
2122 	}
2123 	drm_connector_list_iter_end(&iter);
2124 
2125 	if (need_hotplug)
2126 		drm_kms_helper_hotplug_event(dev);
2127 }
2128 
2129 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2130 {
2131 	struct smu_context *smu = &adev->smu;
2132 	int ret = 0;
2133 
2134 	if (!is_support_sw_smu(adev))
2135 		return 0;
2136 
2137 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2138 	 * on window driver dc implementation.
2139 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2140 	 * should be passed to smu during boot up and resume from s3.
2141 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2142 	 * dcn20_resource_construct
2143 	 * then call pplib functions below to pass the settings to smu:
2144 	 * smu_set_watermarks_for_clock_ranges
2145 	 * smu_set_watermarks_table
2146 	 * navi10_set_watermarks_table
2147 	 * smu_write_watermarks_table
2148 	 *
2149 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2150 	 * dc has implemented different flow for window driver:
2151 	 * dc_hardware_init / dc_set_power_state
2152 	 * dcn10_init_hw
2153 	 * notify_wm_ranges
2154 	 * set_wm_ranges
2155 	 * -- Linux
2156 	 * smu_set_watermarks_for_clock_ranges
2157 	 * renoir_set_watermarks_table
2158 	 * smu_write_watermarks_table
2159 	 *
2160 	 * For Linux,
2161 	 * dc_hardware_init -> amdgpu_dm_init
2162 	 * dc_set_power_state --> dm_resume
2163 	 *
2164 	 * therefore, this function apply to navi10/12/14 but not Renoir
2165 	 * *
2166 	 */
2167 	switch (adev->ip_versions[DCE_HWIP][0]) {
2168 	case IP_VERSION(2, 0, 2):
2169 	case IP_VERSION(2, 0, 0):
2170 		break;
2171 	default:
2172 		return 0;
2173 	}
2174 
2175 	ret = smu_write_watermarks_table(smu);
2176 	if (ret) {
2177 		DRM_ERROR("Failed to update WMTABLE!\n");
2178 		return ret;
2179 	}
2180 
2181 	return 0;
2182 }
2183 
2184 /**
2185  * dm_hw_init() - Initialize DC device
2186  * @handle: The base driver device containing the amdgpu_dm device.
2187  *
2188  * Initialize the &struct amdgpu_display_manager device. This involves calling
2189  * the initializers of each DM component, then populating the struct with them.
2190  *
2191  * Although the function implies hardware initialization, both hardware and
2192  * software are initialized here. Splitting them out to their relevant init
2193  * hooks is a future TODO item.
2194  *
2195  * Some notable things that are initialized here:
2196  *
2197  * - Display Core, both software and hardware
2198  * - DC modules that we need (freesync and color management)
2199  * - DRM software states
2200  * - Interrupt sources and handlers
2201  * - Vblank support
2202  * - Debug FS entries, if enabled
2203  */
2204 static int dm_hw_init(void *handle)
2205 {
2206 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2207 	/* Create DAL display manager */
2208 	amdgpu_dm_init(adev);
2209 	amdgpu_dm_hpd_init(adev);
2210 
2211 	return 0;
2212 }
2213 
2214 /**
2215  * dm_hw_fini() - Teardown DC device
2216  * @handle: The base driver device containing the amdgpu_dm device.
2217  *
2218  * Teardown components within &struct amdgpu_display_manager that require
2219  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2220  * were loaded. Also flush IRQ workqueues and disable them.
2221  */
2222 static int dm_hw_fini(void *handle)
2223 {
2224 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2225 
2226 	amdgpu_dm_hpd_fini(adev);
2227 
2228 	amdgpu_dm_irq_fini(adev);
2229 	amdgpu_dm_fini(adev);
2230 	return 0;
2231 }
2232 
2233 
2234 static int dm_enable_vblank(struct drm_crtc *crtc);
2235 static void dm_disable_vblank(struct drm_crtc *crtc);
2236 
2237 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2238 				 struct dc_state *state, bool enable)
2239 {
2240 	enum dc_irq_source irq_source;
2241 	struct amdgpu_crtc *acrtc;
2242 	int rc = -EBUSY;
2243 	int i = 0;
2244 
2245 	for (i = 0; i < state->stream_count; i++) {
2246 		acrtc = get_crtc_by_otg_inst(
2247 				adev, state->stream_status[i].primary_otg_inst);
2248 
2249 		if (acrtc && state->stream_status[i].plane_count != 0) {
2250 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2251 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2252 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2253 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2254 			if (rc)
2255 				DRM_WARN("Failed to %s pflip interrupts\n",
2256 					 enable ? "enable" : "disable");
2257 
2258 			if (enable) {
2259 				rc = dm_enable_vblank(&acrtc->base);
2260 				if (rc)
2261 					DRM_WARN("Failed to enable vblank interrupts\n");
2262 			} else {
2263 				dm_disable_vblank(&acrtc->base);
2264 			}
2265 
2266 		}
2267 	}
2268 
2269 }
2270 
2271 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2272 {
2273 	struct dc_state *context = NULL;
2274 	enum dc_status res = DC_ERROR_UNEXPECTED;
2275 	int i;
2276 	struct dc_stream_state *del_streams[MAX_PIPES];
2277 	int del_streams_count = 0;
2278 
2279 	memset(del_streams, 0, sizeof(del_streams));
2280 
2281 	context = dc_create_state(dc);
2282 	if (context == NULL)
2283 		goto context_alloc_fail;
2284 
2285 	dc_resource_state_copy_construct_current(dc, context);
2286 
2287 	/* First remove from context all streams */
2288 	for (i = 0; i < context->stream_count; i++) {
2289 		struct dc_stream_state *stream = context->streams[i];
2290 
2291 		del_streams[del_streams_count++] = stream;
2292 	}
2293 
2294 	/* Remove all planes for removed streams and then remove the streams */
2295 	for (i = 0; i < del_streams_count; i++) {
2296 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2297 			res = DC_FAIL_DETACH_SURFACES;
2298 			goto fail;
2299 		}
2300 
2301 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2302 		if (res != DC_OK)
2303 			goto fail;
2304 	}
2305 
2306 
2307 	res = dc_validate_global_state(dc, context, false);
2308 
2309 	if (res != DC_OK) {
2310 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2311 		goto fail;
2312 	}
2313 
2314 	res = dc_commit_state(dc, context);
2315 
2316 fail:
2317 	dc_release_state(context);
2318 
2319 context_alloc_fail:
2320 	return res;
2321 }
2322 
2323 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2324 {
2325 	int i;
2326 
2327 	if (dm->hpd_rx_offload_wq) {
2328 		for (i = 0; i < dm->dc->caps.max_links; i++)
2329 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2330 	}
2331 }
2332 
2333 static int dm_suspend(void *handle)
2334 {
2335 	struct amdgpu_device *adev = handle;
2336 	struct amdgpu_display_manager *dm = &adev->dm;
2337 	int ret = 0;
2338 
2339 	if (amdgpu_in_reset(adev)) {
2340 		mutex_lock(&dm->dc_lock);
2341 
2342 #if defined(CONFIG_DRM_AMD_DC_DCN)
2343 		dc_allow_idle_optimizations(adev->dm.dc, false);
2344 #endif
2345 
2346 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2347 
2348 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2349 
2350 		amdgpu_dm_commit_zero_streams(dm->dc);
2351 
2352 		amdgpu_dm_irq_suspend(adev);
2353 
2354 		hpd_rx_irq_work_suspend(dm);
2355 
2356 		return ret;
2357 	}
2358 
2359 	WARN_ON(adev->dm.cached_state);
2360 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2361 
2362 	s3_handle_mst(adev_to_drm(adev), true);
2363 
2364 	amdgpu_dm_irq_suspend(adev);
2365 
2366 	hpd_rx_irq_work_suspend(dm);
2367 
2368 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2369 
2370 	return 0;
2371 }
2372 
2373 static struct amdgpu_dm_connector *
2374 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2375 					     struct drm_crtc *crtc)
2376 {
2377 	uint32_t i;
2378 	struct drm_connector_state *new_con_state;
2379 	struct drm_connector *connector;
2380 	struct drm_crtc *crtc_from_state;
2381 
2382 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2383 		crtc_from_state = new_con_state->crtc;
2384 
2385 		if (crtc_from_state == crtc)
2386 			return to_amdgpu_dm_connector(connector);
2387 	}
2388 
2389 	return NULL;
2390 }
2391 
2392 static void emulated_link_detect(struct dc_link *link)
2393 {
2394 	struct dc_sink_init_data sink_init_data = { 0 };
2395 	struct display_sink_capability sink_caps = { 0 };
2396 	enum dc_edid_status edid_status;
2397 	struct dc_context *dc_ctx = link->ctx;
2398 	struct dc_sink *sink = NULL;
2399 	struct dc_sink *prev_sink = NULL;
2400 
2401 	link->type = dc_connection_none;
2402 	prev_sink = link->local_sink;
2403 
2404 	if (prev_sink)
2405 		dc_sink_release(prev_sink);
2406 
2407 	switch (link->connector_signal) {
2408 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2409 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2410 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2411 		break;
2412 	}
2413 
2414 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2415 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2416 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2417 		break;
2418 	}
2419 
2420 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2421 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2422 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2423 		break;
2424 	}
2425 
2426 	case SIGNAL_TYPE_LVDS: {
2427 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2428 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2429 		break;
2430 	}
2431 
2432 	case SIGNAL_TYPE_EDP: {
2433 		sink_caps.transaction_type =
2434 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2435 		sink_caps.signal = SIGNAL_TYPE_EDP;
2436 		break;
2437 	}
2438 
2439 	case SIGNAL_TYPE_DISPLAY_PORT: {
2440 		sink_caps.transaction_type =
2441 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2442 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2443 		break;
2444 	}
2445 
2446 	default:
2447 		DC_ERROR("Invalid connector type! signal:%d\n",
2448 			link->connector_signal);
2449 		return;
2450 	}
2451 
2452 	sink_init_data.link = link;
2453 	sink_init_data.sink_signal = sink_caps.signal;
2454 
2455 	sink = dc_sink_create(&sink_init_data);
2456 	if (!sink) {
2457 		DC_ERROR("Failed to create sink!\n");
2458 		return;
2459 	}
2460 
2461 	/* dc_sink_create returns a new reference */
2462 	link->local_sink = sink;
2463 
2464 	edid_status = dm_helpers_read_local_edid(
2465 			link->ctx,
2466 			link,
2467 			sink);
2468 
2469 	if (edid_status != EDID_OK)
2470 		DC_ERROR("Failed to read EDID");
2471 
2472 }
2473 
2474 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2475 				     struct amdgpu_display_manager *dm)
2476 {
2477 	struct {
2478 		struct dc_surface_update surface_updates[MAX_SURFACES];
2479 		struct dc_plane_info plane_infos[MAX_SURFACES];
2480 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2481 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2482 		struct dc_stream_update stream_update;
2483 	} * bundle;
2484 	int k, m;
2485 
2486 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2487 
2488 	if (!bundle) {
2489 		dm_error("Failed to allocate update bundle\n");
2490 		goto cleanup;
2491 	}
2492 
2493 	for (k = 0; k < dc_state->stream_count; k++) {
2494 		bundle->stream_update.stream = dc_state->streams[k];
2495 
2496 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2497 			bundle->surface_updates[m].surface =
2498 				dc_state->stream_status->plane_states[m];
2499 			bundle->surface_updates[m].surface->force_full_update =
2500 				true;
2501 		}
2502 		dc_commit_updates_for_stream(
2503 			dm->dc, bundle->surface_updates,
2504 			dc_state->stream_status->plane_count,
2505 			dc_state->streams[k], &bundle->stream_update, dc_state);
2506 	}
2507 
2508 cleanup:
2509 	kfree(bundle);
2510 
2511 	return;
2512 }
2513 
2514 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2515 {
2516 	struct dc_stream_state *stream_state;
2517 	struct amdgpu_dm_connector *aconnector = link->priv;
2518 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2519 	struct dc_stream_update stream_update;
2520 	bool dpms_off = true;
2521 
2522 	memset(&stream_update, 0, sizeof(stream_update));
2523 	stream_update.dpms_off = &dpms_off;
2524 
2525 	mutex_lock(&adev->dm.dc_lock);
2526 	stream_state = dc_stream_find_from_link(link);
2527 
2528 	if (stream_state == NULL) {
2529 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2530 		mutex_unlock(&adev->dm.dc_lock);
2531 		return;
2532 	}
2533 
2534 	stream_update.stream = stream_state;
2535 	acrtc_state->force_dpms_off = true;
2536 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2537 				     stream_state, &stream_update,
2538 				     stream_state->ctx->dc->current_state);
2539 	mutex_unlock(&adev->dm.dc_lock);
2540 }
2541 
2542 static int dm_resume(void *handle)
2543 {
2544 	struct amdgpu_device *adev = handle;
2545 	struct drm_device *ddev = adev_to_drm(adev);
2546 	struct amdgpu_display_manager *dm = &adev->dm;
2547 	struct amdgpu_dm_connector *aconnector;
2548 	struct drm_connector *connector;
2549 	struct drm_connector_list_iter iter;
2550 	struct drm_crtc *crtc;
2551 	struct drm_crtc_state *new_crtc_state;
2552 	struct dm_crtc_state *dm_new_crtc_state;
2553 	struct drm_plane *plane;
2554 	struct drm_plane_state *new_plane_state;
2555 	struct dm_plane_state *dm_new_plane_state;
2556 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2557 	enum dc_connection_type new_connection_type = dc_connection_none;
2558 	struct dc_state *dc_state;
2559 	int i, r, j;
2560 
2561 	if (amdgpu_in_reset(adev)) {
2562 		dc_state = dm->cached_dc_state;
2563 
2564 		r = dm_dmub_hw_init(adev);
2565 		if (r)
2566 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2567 
2568 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2569 		dc_resume(dm->dc);
2570 
2571 		amdgpu_dm_irq_resume_early(adev);
2572 
2573 		for (i = 0; i < dc_state->stream_count; i++) {
2574 			dc_state->streams[i]->mode_changed = true;
2575 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2576 				dc_state->stream_status->plane_states[j]->update_flags.raw
2577 					= 0xffffffff;
2578 			}
2579 		}
2580 #if defined(CONFIG_DRM_AMD_DC_DCN)
2581 		/*
2582 		 * Resource allocation happens for link encoders for newer ASIC in
2583 		 * dc_validate_global_state, so we need to revalidate it.
2584 		 *
2585 		 * This shouldn't fail (it passed once before), so warn if it does.
2586 		 */
2587 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2588 #endif
2589 
2590 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2591 
2592 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2593 
2594 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2595 
2596 		dc_release_state(dm->cached_dc_state);
2597 		dm->cached_dc_state = NULL;
2598 
2599 		amdgpu_dm_irq_resume_late(adev);
2600 
2601 		mutex_unlock(&dm->dc_lock);
2602 
2603 		return 0;
2604 	}
2605 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2606 	dc_release_state(dm_state->context);
2607 	dm_state->context = dc_create_state(dm->dc);
2608 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2609 	dc_resource_state_construct(dm->dc, dm_state->context);
2610 
2611 	/* Before powering on DC we need to re-initialize DMUB. */
2612 	r = dm_dmub_hw_init(adev);
2613 	if (r)
2614 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2615 
2616 	/* power on hardware */
2617 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2618 
2619 	/* program HPD filter */
2620 	dc_resume(dm->dc);
2621 
2622 	/*
2623 	 * early enable HPD Rx IRQ, should be done before set mode as short
2624 	 * pulse interrupts are used for MST
2625 	 */
2626 	amdgpu_dm_irq_resume_early(adev);
2627 
2628 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2629 	s3_handle_mst(ddev, false);
2630 
2631 	/* Do detection*/
2632 	drm_connector_list_iter_begin(ddev, &iter);
2633 	drm_for_each_connector_iter(connector, &iter) {
2634 		aconnector = to_amdgpu_dm_connector(connector);
2635 
2636 		/*
2637 		 * this is the case when traversing through already created
2638 		 * MST connectors, should be skipped
2639 		 */
2640 		if (aconnector->mst_port)
2641 			continue;
2642 
2643 		mutex_lock(&aconnector->hpd_lock);
2644 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2645 			DRM_ERROR("KMS: Failed to detect connector\n");
2646 
2647 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2648 			emulated_link_detect(aconnector->dc_link);
2649 		else
2650 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2651 
2652 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2653 			aconnector->fake_enable = false;
2654 
2655 		if (aconnector->dc_sink)
2656 			dc_sink_release(aconnector->dc_sink);
2657 		aconnector->dc_sink = NULL;
2658 		amdgpu_dm_update_connector_after_detect(aconnector);
2659 		mutex_unlock(&aconnector->hpd_lock);
2660 	}
2661 	drm_connector_list_iter_end(&iter);
2662 
2663 	/* Force mode set in atomic commit */
2664 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2665 		new_crtc_state->active_changed = true;
2666 
2667 	/*
2668 	 * atomic_check is expected to create the dc states. We need to release
2669 	 * them here, since they were duplicated as part of the suspend
2670 	 * procedure.
2671 	 */
2672 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2673 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2674 		if (dm_new_crtc_state->stream) {
2675 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2676 			dc_stream_release(dm_new_crtc_state->stream);
2677 			dm_new_crtc_state->stream = NULL;
2678 		}
2679 	}
2680 
2681 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2682 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2683 		if (dm_new_plane_state->dc_state) {
2684 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2685 			dc_plane_state_release(dm_new_plane_state->dc_state);
2686 			dm_new_plane_state->dc_state = NULL;
2687 		}
2688 	}
2689 
2690 	drm_atomic_helper_resume(ddev, dm->cached_state);
2691 
2692 	dm->cached_state = NULL;
2693 
2694 	amdgpu_dm_irq_resume_late(adev);
2695 
2696 	amdgpu_dm_smu_write_watermarks_table(adev);
2697 
2698 	return 0;
2699 }
2700 
2701 /**
2702  * DOC: DM Lifecycle
2703  *
2704  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2705  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2706  * the base driver's device list to be initialized and torn down accordingly.
2707  *
2708  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2709  */
2710 
2711 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2712 	.name = "dm",
2713 	.early_init = dm_early_init,
2714 	.late_init = dm_late_init,
2715 	.sw_init = dm_sw_init,
2716 	.sw_fini = dm_sw_fini,
2717 	.early_fini = amdgpu_dm_early_fini,
2718 	.hw_init = dm_hw_init,
2719 	.hw_fini = dm_hw_fini,
2720 	.suspend = dm_suspend,
2721 	.resume = dm_resume,
2722 	.is_idle = dm_is_idle,
2723 	.wait_for_idle = dm_wait_for_idle,
2724 	.check_soft_reset = dm_check_soft_reset,
2725 	.soft_reset = dm_soft_reset,
2726 	.set_clockgating_state = dm_set_clockgating_state,
2727 	.set_powergating_state = dm_set_powergating_state,
2728 };
2729 
2730 const struct amdgpu_ip_block_version dm_ip_block =
2731 {
2732 	.type = AMD_IP_BLOCK_TYPE_DCE,
2733 	.major = 1,
2734 	.minor = 0,
2735 	.rev = 0,
2736 	.funcs = &amdgpu_dm_funcs,
2737 };
2738 
2739 
2740 /**
2741  * DOC: atomic
2742  *
2743  * *WIP*
2744  */
2745 
2746 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2747 	.fb_create = amdgpu_display_user_framebuffer_create,
2748 	.get_format_info = amd_get_format_info,
2749 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2750 	.atomic_check = amdgpu_dm_atomic_check,
2751 	.atomic_commit = drm_atomic_helper_commit,
2752 };
2753 
2754 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2755 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2756 };
2757 
2758 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2759 {
2760 	u32 max_cll, min_cll, max, min, q, r;
2761 	struct amdgpu_dm_backlight_caps *caps;
2762 	struct amdgpu_display_manager *dm;
2763 	struct drm_connector *conn_base;
2764 	struct amdgpu_device *adev;
2765 	struct dc_link *link = NULL;
2766 	static const u8 pre_computed_values[] = {
2767 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2768 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2769 	int i;
2770 
2771 	if (!aconnector || !aconnector->dc_link)
2772 		return;
2773 
2774 	link = aconnector->dc_link;
2775 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2776 		return;
2777 
2778 	conn_base = &aconnector->base;
2779 	adev = drm_to_adev(conn_base->dev);
2780 	dm = &adev->dm;
2781 	for (i = 0; i < dm->num_of_edps; i++) {
2782 		if (link == dm->backlight_link[i])
2783 			break;
2784 	}
2785 	if (i >= dm->num_of_edps)
2786 		return;
2787 	caps = &dm->backlight_caps[i];
2788 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2789 	caps->aux_support = false;
2790 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2791 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2792 
2793 	if (caps->ext_caps->bits.oled == 1 /*||
2794 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2795 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2796 		caps->aux_support = true;
2797 
2798 	if (amdgpu_backlight == 0)
2799 		caps->aux_support = false;
2800 	else if (amdgpu_backlight == 1)
2801 		caps->aux_support = true;
2802 
2803 	/* From the specification (CTA-861-G), for calculating the maximum
2804 	 * luminance we need to use:
2805 	 *	Luminance = 50*2**(CV/32)
2806 	 * Where CV is a one-byte value.
2807 	 * For calculating this expression we may need float point precision;
2808 	 * to avoid this complexity level, we take advantage that CV is divided
2809 	 * by a constant. From the Euclids division algorithm, we know that CV
2810 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2811 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2812 	 * need to pre-compute the value of r/32. For pre-computing the values
2813 	 * We just used the following Ruby line:
2814 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2815 	 * The results of the above expressions can be verified at
2816 	 * pre_computed_values.
2817 	 */
2818 	q = max_cll >> 5;
2819 	r = max_cll % 32;
2820 	max = (1 << q) * pre_computed_values[r];
2821 
2822 	// min luminance: maxLum * (CV/255)^2 / 100
2823 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2824 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2825 
2826 	caps->aux_max_input_signal = max;
2827 	caps->aux_min_input_signal = min;
2828 }
2829 
2830 void amdgpu_dm_update_connector_after_detect(
2831 		struct amdgpu_dm_connector *aconnector)
2832 {
2833 	struct drm_connector *connector = &aconnector->base;
2834 	struct drm_device *dev = connector->dev;
2835 	struct dc_sink *sink;
2836 
2837 	/* MST handled by drm_mst framework */
2838 	if (aconnector->mst_mgr.mst_state == true)
2839 		return;
2840 
2841 	sink = aconnector->dc_link->local_sink;
2842 	if (sink)
2843 		dc_sink_retain(sink);
2844 
2845 	/*
2846 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2847 	 * the connector sink is set to either fake or physical sink depends on link status.
2848 	 * Skip if already done during boot.
2849 	 */
2850 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2851 			&& aconnector->dc_em_sink) {
2852 
2853 		/*
2854 		 * For S3 resume with headless use eml_sink to fake stream
2855 		 * because on resume connector->sink is set to NULL
2856 		 */
2857 		mutex_lock(&dev->mode_config.mutex);
2858 
2859 		if (sink) {
2860 			if (aconnector->dc_sink) {
2861 				amdgpu_dm_update_freesync_caps(connector, NULL);
2862 				/*
2863 				 * retain and release below are used to
2864 				 * bump up refcount for sink because the link doesn't point
2865 				 * to it anymore after disconnect, so on next crtc to connector
2866 				 * reshuffle by UMD we will get into unwanted dc_sink release
2867 				 */
2868 				dc_sink_release(aconnector->dc_sink);
2869 			}
2870 			aconnector->dc_sink = sink;
2871 			dc_sink_retain(aconnector->dc_sink);
2872 			amdgpu_dm_update_freesync_caps(connector,
2873 					aconnector->edid);
2874 		} else {
2875 			amdgpu_dm_update_freesync_caps(connector, NULL);
2876 			if (!aconnector->dc_sink) {
2877 				aconnector->dc_sink = aconnector->dc_em_sink;
2878 				dc_sink_retain(aconnector->dc_sink);
2879 			}
2880 		}
2881 
2882 		mutex_unlock(&dev->mode_config.mutex);
2883 
2884 		if (sink)
2885 			dc_sink_release(sink);
2886 		return;
2887 	}
2888 
2889 	/*
2890 	 * TODO: temporary guard to look for proper fix
2891 	 * if this sink is MST sink, we should not do anything
2892 	 */
2893 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2894 		dc_sink_release(sink);
2895 		return;
2896 	}
2897 
2898 	if (aconnector->dc_sink == sink) {
2899 		/*
2900 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2901 		 * Do nothing!!
2902 		 */
2903 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2904 				aconnector->connector_id);
2905 		if (sink)
2906 			dc_sink_release(sink);
2907 		return;
2908 	}
2909 
2910 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2911 		aconnector->connector_id, aconnector->dc_sink, sink);
2912 
2913 	mutex_lock(&dev->mode_config.mutex);
2914 
2915 	/*
2916 	 * 1. Update status of the drm connector
2917 	 * 2. Send an event and let userspace tell us what to do
2918 	 */
2919 	if (sink) {
2920 		/*
2921 		 * TODO: check if we still need the S3 mode update workaround.
2922 		 * If yes, put it here.
2923 		 */
2924 		if (aconnector->dc_sink) {
2925 			amdgpu_dm_update_freesync_caps(connector, NULL);
2926 			dc_sink_release(aconnector->dc_sink);
2927 		}
2928 
2929 		aconnector->dc_sink = sink;
2930 		dc_sink_retain(aconnector->dc_sink);
2931 		if (sink->dc_edid.length == 0) {
2932 			aconnector->edid = NULL;
2933 			if (aconnector->dc_link->aux_mode) {
2934 				drm_dp_cec_unset_edid(
2935 					&aconnector->dm_dp_aux.aux);
2936 			}
2937 		} else {
2938 			aconnector->edid =
2939 				(struct edid *)sink->dc_edid.raw_edid;
2940 
2941 			drm_connector_update_edid_property(connector,
2942 							   aconnector->edid);
2943 			if (aconnector->dc_link->aux_mode)
2944 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2945 						    aconnector->edid);
2946 		}
2947 
2948 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2949 		update_connector_ext_caps(aconnector);
2950 	} else {
2951 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2952 		amdgpu_dm_update_freesync_caps(connector, NULL);
2953 		drm_connector_update_edid_property(connector, NULL);
2954 		aconnector->num_modes = 0;
2955 		dc_sink_release(aconnector->dc_sink);
2956 		aconnector->dc_sink = NULL;
2957 		aconnector->edid = NULL;
2958 #ifdef CONFIG_DRM_AMD_DC_HDCP
2959 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2960 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2961 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2962 #endif
2963 	}
2964 
2965 	mutex_unlock(&dev->mode_config.mutex);
2966 
2967 	update_subconnector_property(aconnector);
2968 
2969 	if (sink)
2970 		dc_sink_release(sink);
2971 }
2972 
2973 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2974 {
2975 	struct drm_connector *connector = &aconnector->base;
2976 	struct drm_device *dev = connector->dev;
2977 	enum dc_connection_type new_connection_type = dc_connection_none;
2978 	struct amdgpu_device *adev = drm_to_adev(dev);
2979 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2980 	struct dm_crtc_state *dm_crtc_state = NULL;
2981 
2982 	if (adev->dm.disable_hpd_irq)
2983 		return;
2984 
2985 	if (dm_con_state->base.state && dm_con_state->base.crtc)
2986 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2987 					dm_con_state->base.state,
2988 					dm_con_state->base.crtc));
2989 	/*
2990 	 * In case of failure or MST no need to update connector status or notify the OS
2991 	 * since (for MST case) MST does this in its own context.
2992 	 */
2993 	mutex_lock(&aconnector->hpd_lock);
2994 
2995 #ifdef CONFIG_DRM_AMD_DC_HDCP
2996 	if (adev->dm.hdcp_workqueue) {
2997 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2998 		dm_con_state->update_hdcp = true;
2999 	}
3000 #endif
3001 	if (aconnector->fake_enable)
3002 		aconnector->fake_enable = false;
3003 
3004 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3005 		DRM_ERROR("KMS: Failed to detect connector\n");
3006 
3007 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3008 		emulated_link_detect(aconnector->dc_link);
3009 
3010 		drm_modeset_lock_all(dev);
3011 		dm_restore_drm_connector_state(dev, connector);
3012 		drm_modeset_unlock_all(dev);
3013 
3014 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3015 			drm_kms_helper_hotplug_event(dev);
3016 
3017 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3018 		if (new_connection_type == dc_connection_none &&
3019 		    aconnector->dc_link->type == dc_connection_none &&
3020 		    dm_crtc_state)
3021 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3022 
3023 		amdgpu_dm_update_connector_after_detect(aconnector);
3024 
3025 		drm_modeset_lock_all(dev);
3026 		dm_restore_drm_connector_state(dev, connector);
3027 		drm_modeset_unlock_all(dev);
3028 
3029 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3030 			drm_kms_helper_hotplug_event(dev);
3031 	}
3032 	mutex_unlock(&aconnector->hpd_lock);
3033 
3034 }
3035 
3036 static void handle_hpd_irq(void *param)
3037 {
3038 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3039 
3040 	handle_hpd_irq_helper(aconnector);
3041 
3042 }
3043 
3044 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3045 {
3046 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3047 	uint8_t dret;
3048 	bool new_irq_handled = false;
3049 	int dpcd_addr;
3050 	int dpcd_bytes_to_read;
3051 
3052 	const int max_process_count = 30;
3053 	int process_count = 0;
3054 
3055 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3056 
3057 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3058 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3059 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3060 		dpcd_addr = DP_SINK_COUNT;
3061 	} else {
3062 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3063 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3064 		dpcd_addr = DP_SINK_COUNT_ESI;
3065 	}
3066 
3067 	dret = drm_dp_dpcd_read(
3068 		&aconnector->dm_dp_aux.aux,
3069 		dpcd_addr,
3070 		esi,
3071 		dpcd_bytes_to_read);
3072 
3073 	while (dret == dpcd_bytes_to_read &&
3074 		process_count < max_process_count) {
3075 		uint8_t retry;
3076 		dret = 0;
3077 
3078 		process_count++;
3079 
3080 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3081 		/* handle HPD short pulse irq */
3082 		if (aconnector->mst_mgr.mst_state)
3083 			drm_dp_mst_hpd_irq(
3084 				&aconnector->mst_mgr,
3085 				esi,
3086 				&new_irq_handled);
3087 
3088 		if (new_irq_handled) {
3089 			/* ACK at DPCD to notify down stream */
3090 			const int ack_dpcd_bytes_to_write =
3091 				dpcd_bytes_to_read - 1;
3092 
3093 			for (retry = 0; retry < 3; retry++) {
3094 				uint8_t wret;
3095 
3096 				wret = drm_dp_dpcd_write(
3097 					&aconnector->dm_dp_aux.aux,
3098 					dpcd_addr + 1,
3099 					&esi[1],
3100 					ack_dpcd_bytes_to_write);
3101 				if (wret == ack_dpcd_bytes_to_write)
3102 					break;
3103 			}
3104 
3105 			/* check if there is new irq to be handled */
3106 			dret = drm_dp_dpcd_read(
3107 				&aconnector->dm_dp_aux.aux,
3108 				dpcd_addr,
3109 				esi,
3110 				dpcd_bytes_to_read);
3111 
3112 			new_irq_handled = false;
3113 		} else {
3114 			break;
3115 		}
3116 	}
3117 
3118 	if (process_count == max_process_count)
3119 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3120 }
3121 
3122 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3123 							union hpd_irq_data hpd_irq_data)
3124 {
3125 	struct hpd_rx_irq_offload_work *offload_work =
3126 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3127 
3128 	if (!offload_work) {
3129 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3130 		return;
3131 	}
3132 
3133 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3134 	offload_work->data = hpd_irq_data;
3135 	offload_work->offload_wq = offload_wq;
3136 
3137 	queue_work(offload_wq->wq, &offload_work->work);
3138 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3139 }
3140 
3141 static void handle_hpd_rx_irq(void *param)
3142 {
3143 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3144 	struct drm_connector *connector = &aconnector->base;
3145 	struct drm_device *dev = connector->dev;
3146 	struct dc_link *dc_link = aconnector->dc_link;
3147 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3148 	bool result = false;
3149 	enum dc_connection_type new_connection_type = dc_connection_none;
3150 	struct amdgpu_device *adev = drm_to_adev(dev);
3151 	union hpd_irq_data hpd_irq_data;
3152 	bool link_loss = false;
3153 	bool has_left_work = false;
3154 	int idx = aconnector->base.index;
3155 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3156 
3157 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3158 
3159 	if (adev->dm.disable_hpd_irq)
3160 		return;
3161 
3162 	/*
3163 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3164 	 * conflict, after implement i2c helper, this mutex should be
3165 	 * retired.
3166 	 */
3167 	mutex_lock(&aconnector->hpd_lock);
3168 
3169 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3170 						&link_loss, true, &has_left_work);
3171 
3172 	if (!has_left_work)
3173 		goto out;
3174 
3175 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3176 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3177 		goto out;
3178 	}
3179 
3180 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3181 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3182 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3183 			dm_handle_mst_sideband_msg(aconnector);
3184 			goto out;
3185 		}
3186 
3187 		if (link_loss) {
3188 			bool skip = false;
3189 
3190 			spin_lock(&offload_wq->offload_lock);
3191 			skip = offload_wq->is_handling_link_loss;
3192 
3193 			if (!skip)
3194 				offload_wq->is_handling_link_loss = true;
3195 
3196 			spin_unlock(&offload_wq->offload_lock);
3197 
3198 			if (!skip)
3199 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3200 
3201 			goto out;
3202 		}
3203 	}
3204 
3205 out:
3206 	if (result && !is_mst_root_connector) {
3207 		/* Downstream Port status changed. */
3208 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3209 			DRM_ERROR("KMS: Failed to detect connector\n");
3210 
3211 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3212 			emulated_link_detect(dc_link);
3213 
3214 			if (aconnector->fake_enable)
3215 				aconnector->fake_enable = false;
3216 
3217 			amdgpu_dm_update_connector_after_detect(aconnector);
3218 
3219 
3220 			drm_modeset_lock_all(dev);
3221 			dm_restore_drm_connector_state(dev, connector);
3222 			drm_modeset_unlock_all(dev);
3223 
3224 			drm_kms_helper_hotplug_event(dev);
3225 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3226 
3227 			if (aconnector->fake_enable)
3228 				aconnector->fake_enable = false;
3229 
3230 			amdgpu_dm_update_connector_after_detect(aconnector);
3231 
3232 
3233 			drm_modeset_lock_all(dev);
3234 			dm_restore_drm_connector_state(dev, connector);
3235 			drm_modeset_unlock_all(dev);
3236 
3237 			drm_kms_helper_hotplug_event(dev);
3238 		}
3239 	}
3240 #ifdef CONFIG_DRM_AMD_DC_HDCP
3241 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3242 		if (adev->dm.hdcp_workqueue)
3243 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3244 	}
3245 #endif
3246 
3247 	if (dc_link->type != dc_connection_mst_branch)
3248 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3249 
3250 	mutex_unlock(&aconnector->hpd_lock);
3251 }
3252 
3253 static void register_hpd_handlers(struct amdgpu_device *adev)
3254 {
3255 	struct drm_device *dev = adev_to_drm(adev);
3256 	struct drm_connector *connector;
3257 	struct amdgpu_dm_connector *aconnector;
3258 	const struct dc_link *dc_link;
3259 	struct dc_interrupt_params int_params = {0};
3260 
3261 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3262 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3263 
3264 	list_for_each_entry(connector,
3265 			&dev->mode_config.connector_list, head)	{
3266 
3267 		aconnector = to_amdgpu_dm_connector(connector);
3268 		dc_link = aconnector->dc_link;
3269 
3270 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3271 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3272 			int_params.irq_source = dc_link->irq_source_hpd;
3273 
3274 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3275 					handle_hpd_irq,
3276 					(void *) aconnector);
3277 		}
3278 
3279 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3280 
3281 			/* Also register for DP short pulse (hpd_rx). */
3282 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3283 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3284 
3285 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3286 					handle_hpd_rx_irq,
3287 					(void *) aconnector);
3288 
3289 			if (adev->dm.hpd_rx_offload_wq)
3290 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3291 					aconnector;
3292 		}
3293 	}
3294 }
3295 
3296 #if defined(CONFIG_DRM_AMD_DC_SI)
3297 /* Register IRQ sources and initialize IRQ callbacks */
3298 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3299 {
3300 	struct dc *dc = adev->dm.dc;
3301 	struct common_irq_params *c_irq_params;
3302 	struct dc_interrupt_params int_params = {0};
3303 	int r;
3304 	int i;
3305 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3306 
3307 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3308 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3309 
3310 	/*
3311 	 * Actions of amdgpu_irq_add_id():
3312 	 * 1. Register a set() function with base driver.
3313 	 *    Base driver will call set() function to enable/disable an
3314 	 *    interrupt in DC hardware.
3315 	 * 2. Register amdgpu_dm_irq_handler().
3316 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3317 	 *    coming from DC hardware.
3318 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3319 	 *    for acknowledging and handling. */
3320 
3321 	/* Use VBLANK interrupt */
3322 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3323 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3324 		if (r) {
3325 			DRM_ERROR("Failed to add crtc irq id!\n");
3326 			return r;
3327 		}
3328 
3329 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3330 		int_params.irq_source =
3331 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3332 
3333 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3334 
3335 		c_irq_params->adev = adev;
3336 		c_irq_params->irq_src = int_params.irq_source;
3337 
3338 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3339 				dm_crtc_high_irq, c_irq_params);
3340 	}
3341 
3342 	/* Use GRPH_PFLIP interrupt */
3343 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3344 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3345 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3346 		if (r) {
3347 			DRM_ERROR("Failed to add page flip irq id!\n");
3348 			return r;
3349 		}
3350 
3351 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3352 		int_params.irq_source =
3353 			dc_interrupt_to_irq_source(dc, i, 0);
3354 
3355 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3356 
3357 		c_irq_params->adev = adev;
3358 		c_irq_params->irq_src = int_params.irq_source;
3359 
3360 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3361 				dm_pflip_high_irq, c_irq_params);
3362 
3363 	}
3364 
3365 	/* HPD */
3366 	r = amdgpu_irq_add_id(adev, client_id,
3367 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3368 	if (r) {
3369 		DRM_ERROR("Failed to add hpd irq id!\n");
3370 		return r;
3371 	}
3372 
3373 	register_hpd_handlers(adev);
3374 
3375 	return 0;
3376 }
3377 #endif
3378 
3379 /* Register IRQ sources and initialize IRQ callbacks */
3380 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3381 {
3382 	struct dc *dc = adev->dm.dc;
3383 	struct common_irq_params *c_irq_params;
3384 	struct dc_interrupt_params int_params = {0};
3385 	int r;
3386 	int i;
3387 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3388 
3389 	if (adev->family >= AMDGPU_FAMILY_AI)
3390 		client_id = SOC15_IH_CLIENTID_DCE;
3391 
3392 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3393 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3394 
3395 	/*
3396 	 * Actions of amdgpu_irq_add_id():
3397 	 * 1. Register a set() function with base driver.
3398 	 *    Base driver will call set() function to enable/disable an
3399 	 *    interrupt in DC hardware.
3400 	 * 2. Register amdgpu_dm_irq_handler().
3401 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3402 	 *    coming from DC hardware.
3403 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3404 	 *    for acknowledging and handling. */
3405 
3406 	/* Use VBLANK interrupt */
3407 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3408 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3409 		if (r) {
3410 			DRM_ERROR("Failed to add crtc irq id!\n");
3411 			return r;
3412 		}
3413 
3414 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3415 		int_params.irq_source =
3416 			dc_interrupt_to_irq_source(dc, i, 0);
3417 
3418 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3419 
3420 		c_irq_params->adev = adev;
3421 		c_irq_params->irq_src = int_params.irq_source;
3422 
3423 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3424 				dm_crtc_high_irq, c_irq_params);
3425 	}
3426 
3427 	/* Use VUPDATE interrupt */
3428 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3429 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3430 		if (r) {
3431 			DRM_ERROR("Failed to add vupdate irq id!\n");
3432 			return r;
3433 		}
3434 
3435 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3436 		int_params.irq_source =
3437 			dc_interrupt_to_irq_source(dc, i, 0);
3438 
3439 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3440 
3441 		c_irq_params->adev = adev;
3442 		c_irq_params->irq_src = int_params.irq_source;
3443 
3444 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3445 				dm_vupdate_high_irq, c_irq_params);
3446 	}
3447 
3448 	/* Use GRPH_PFLIP interrupt */
3449 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3450 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3451 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3452 		if (r) {
3453 			DRM_ERROR("Failed to add page flip irq id!\n");
3454 			return r;
3455 		}
3456 
3457 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3458 		int_params.irq_source =
3459 			dc_interrupt_to_irq_source(dc, i, 0);
3460 
3461 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3462 
3463 		c_irq_params->adev = adev;
3464 		c_irq_params->irq_src = int_params.irq_source;
3465 
3466 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3467 				dm_pflip_high_irq, c_irq_params);
3468 
3469 	}
3470 
3471 	/* HPD */
3472 	r = amdgpu_irq_add_id(adev, client_id,
3473 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3474 	if (r) {
3475 		DRM_ERROR("Failed to add hpd irq id!\n");
3476 		return r;
3477 	}
3478 
3479 	register_hpd_handlers(adev);
3480 
3481 	return 0;
3482 }
3483 
3484 #if defined(CONFIG_DRM_AMD_DC_DCN)
3485 /* Register IRQ sources and initialize IRQ callbacks */
3486 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3487 {
3488 	struct dc *dc = adev->dm.dc;
3489 	struct common_irq_params *c_irq_params;
3490 	struct dc_interrupt_params int_params = {0};
3491 	int r;
3492 	int i;
3493 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3494 	static const unsigned int vrtl_int_srcid[] = {
3495 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3496 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3497 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3498 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3499 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3500 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3501 	};
3502 #endif
3503 
3504 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3505 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3506 
3507 	/*
3508 	 * Actions of amdgpu_irq_add_id():
3509 	 * 1. Register a set() function with base driver.
3510 	 *    Base driver will call set() function to enable/disable an
3511 	 *    interrupt in DC hardware.
3512 	 * 2. Register amdgpu_dm_irq_handler().
3513 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3514 	 *    coming from DC hardware.
3515 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3516 	 *    for acknowledging and handling.
3517 	 */
3518 
3519 	/* Use VSTARTUP interrupt */
3520 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3521 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3522 			i++) {
3523 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3524 
3525 		if (r) {
3526 			DRM_ERROR("Failed to add crtc irq id!\n");
3527 			return r;
3528 		}
3529 
3530 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3531 		int_params.irq_source =
3532 			dc_interrupt_to_irq_source(dc, i, 0);
3533 
3534 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3535 
3536 		c_irq_params->adev = adev;
3537 		c_irq_params->irq_src = int_params.irq_source;
3538 
3539 		amdgpu_dm_irq_register_interrupt(
3540 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3541 	}
3542 
3543 	/* Use otg vertical line interrupt */
3544 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3545 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3546 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3547 				vrtl_int_srcid[i], &adev->vline0_irq);
3548 
3549 		if (r) {
3550 			DRM_ERROR("Failed to add vline0 irq id!\n");
3551 			return r;
3552 		}
3553 
3554 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3555 		int_params.irq_source =
3556 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3557 
3558 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3559 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3560 			break;
3561 		}
3562 
3563 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3564 					- DC_IRQ_SOURCE_DC1_VLINE0];
3565 
3566 		c_irq_params->adev = adev;
3567 		c_irq_params->irq_src = int_params.irq_source;
3568 
3569 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3570 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3571 	}
3572 #endif
3573 
3574 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3575 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3576 	 * to trigger at end of each vblank, regardless of state of the lock,
3577 	 * matching DCE behaviour.
3578 	 */
3579 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3580 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3581 	     i++) {
3582 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3583 
3584 		if (r) {
3585 			DRM_ERROR("Failed to add vupdate irq id!\n");
3586 			return r;
3587 		}
3588 
3589 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3590 		int_params.irq_source =
3591 			dc_interrupt_to_irq_source(dc, i, 0);
3592 
3593 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3594 
3595 		c_irq_params->adev = adev;
3596 		c_irq_params->irq_src = int_params.irq_source;
3597 
3598 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3599 				dm_vupdate_high_irq, c_irq_params);
3600 	}
3601 
3602 	/* Use GRPH_PFLIP interrupt */
3603 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3604 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3605 			i++) {
3606 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3607 		if (r) {
3608 			DRM_ERROR("Failed to add page flip irq id!\n");
3609 			return r;
3610 		}
3611 
3612 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3613 		int_params.irq_source =
3614 			dc_interrupt_to_irq_source(dc, i, 0);
3615 
3616 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3617 
3618 		c_irq_params->adev = adev;
3619 		c_irq_params->irq_src = int_params.irq_source;
3620 
3621 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3622 				dm_pflip_high_irq, c_irq_params);
3623 
3624 	}
3625 
3626 	/* HPD */
3627 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3628 			&adev->hpd_irq);
3629 	if (r) {
3630 		DRM_ERROR("Failed to add hpd irq id!\n");
3631 		return r;
3632 	}
3633 
3634 	register_hpd_handlers(adev);
3635 
3636 	return 0;
3637 }
3638 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3639 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3640 {
3641 	struct dc *dc = adev->dm.dc;
3642 	struct common_irq_params *c_irq_params;
3643 	struct dc_interrupt_params int_params = {0};
3644 	int r, i;
3645 
3646 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3647 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3648 
3649 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3650 			&adev->dmub_outbox_irq);
3651 	if (r) {
3652 		DRM_ERROR("Failed to add outbox irq id!\n");
3653 		return r;
3654 	}
3655 
3656 	if (dc->ctx->dmub_srv) {
3657 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3658 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3659 		int_params.irq_source =
3660 		dc_interrupt_to_irq_source(dc, i, 0);
3661 
3662 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3663 
3664 		c_irq_params->adev = adev;
3665 		c_irq_params->irq_src = int_params.irq_source;
3666 
3667 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3668 				dm_dmub_outbox1_low_irq, c_irq_params);
3669 	}
3670 
3671 	return 0;
3672 }
3673 #endif
3674 
3675 /*
3676  * Acquires the lock for the atomic state object and returns
3677  * the new atomic state.
3678  *
3679  * This should only be called during atomic check.
3680  */
3681 static int dm_atomic_get_state(struct drm_atomic_state *state,
3682 			       struct dm_atomic_state **dm_state)
3683 {
3684 	struct drm_device *dev = state->dev;
3685 	struct amdgpu_device *adev = drm_to_adev(dev);
3686 	struct amdgpu_display_manager *dm = &adev->dm;
3687 	struct drm_private_state *priv_state;
3688 
3689 	if (*dm_state)
3690 		return 0;
3691 
3692 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3693 	if (IS_ERR(priv_state))
3694 		return PTR_ERR(priv_state);
3695 
3696 	*dm_state = to_dm_atomic_state(priv_state);
3697 
3698 	return 0;
3699 }
3700 
3701 static struct dm_atomic_state *
3702 dm_atomic_get_new_state(struct drm_atomic_state *state)
3703 {
3704 	struct drm_device *dev = state->dev;
3705 	struct amdgpu_device *adev = drm_to_adev(dev);
3706 	struct amdgpu_display_manager *dm = &adev->dm;
3707 	struct drm_private_obj *obj;
3708 	struct drm_private_state *new_obj_state;
3709 	int i;
3710 
3711 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3712 		if (obj->funcs == dm->atomic_obj.funcs)
3713 			return to_dm_atomic_state(new_obj_state);
3714 	}
3715 
3716 	return NULL;
3717 }
3718 
3719 static struct drm_private_state *
3720 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3721 {
3722 	struct dm_atomic_state *old_state, *new_state;
3723 
3724 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3725 	if (!new_state)
3726 		return NULL;
3727 
3728 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3729 
3730 	old_state = to_dm_atomic_state(obj->state);
3731 
3732 	if (old_state && old_state->context)
3733 		new_state->context = dc_copy_state(old_state->context);
3734 
3735 	if (!new_state->context) {
3736 		kfree(new_state);
3737 		return NULL;
3738 	}
3739 
3740 	return &new_state->base;
3741 }
3742 
3743 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3744 				    struct drm_private_state *state)
3745 {
3746 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3747 
3748 	if (dm_state && dm_state->context)
3749 		dc_release_state(dm_state->context);
3750 
3751 	kfree(dm_state);
3752 }
3753 
3754 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3755 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3756 	.atomic_destroy_state = dm_atomic_destroy_state,
3757 };
3758 
3759 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3760 {
3761 	struct dm_atomic_state *state;
3762 	int r;
3763 
3764 	adev->mode_info.mode_config_initialized = true;
3765 
3766 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3767 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3768 
3769 	adev_to_drm(adev)->mode_config.max_width = 16384;
3770 	adev_to_drm(adev)->mode_config.max_height = 16384;
3771 
3772 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3773 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3774 	/* indicates support for immediate flip */
3775 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3776 
3777 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3778 
3779 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3780 	if (!state)
3781 		return -ENOMEM;
3782 
3783 	state->context = dc_create_state(adev->dm.dc);
3784 	if (!state->context) {
3785 		kfree(state);
3786 		return -ENOMEM;
3787 	}
3788 
3789 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3790 
3791 	drm_atomic_private_obj_init(adev_to_drm(adev),
3792 				    &adev->dm.atomic_obj,
3793 				    &state->base,
3794 				    &dm_atomic_state_funcs);
3795 
3796 	r = amdgpu_display_modeset_create_props(adev);
3797 	if (r) {
3798 		dc_release_state(state->context);
3799 		kfree(state);
3800 		return r;
3801 	}
3802 
3803 	r = amdgpu_dm_audio_init(adev);
3804 	if (r) {
3805 		dc_release_state(state->context);
3806 		kfree(state);
3807 		return r;
3808 	}
3809 
3810 	return 0;
3811 }
3812 
3813 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3814 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3815 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3816 
3817 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3818 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3819 
3820 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3821 					    int bl_idx)
3822 {
3823 #if defined(CONFIG_ACPI)
3824 	struct amdgpu_dm_backlight_caps caps;
3825 
3826 	memset(&caps, 0, sizeof(caps));
3827 
3828 	if (dm->backlight_caps[bl_idx].caps_valid)
3829 		return;
3830 
3831 	amdgpu_acpi_get_backlight_caps(&caps);
3832 	if (caps.caps_valid) {
3833 		dm->backlight_caps[bl_idx].caps_valid = true;
3834 		if (caps.aux_support)
3835 			return;
3836 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3837 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3838 	} else {
3839 		dm->backlight_caps[bl_idx].min_input_signal =
3840 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3841 		dm->backlight_caps[bl_idx].max_input_signal =
3842 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3843 	}
3844 #else
3845 	if (dm->backlight_caps[bl_idx].aux_support)
3846 		return;
3847 
3848 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3849 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3850 #endif
3851 }
3852 
3853 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3854 				unsigned *min, unsigned *max)
3855 {
3856 	if (!caps)
3857 		return 0;
3858 
3859 	if (caps->aux_support) {
3860 		// Firmware limits are in nits, DC API wants millinits.
3861 		*max = 1000 * caps->aux_max_input_signal;
3862 		*min = 1000 * caps->aux_min_input_signal;
3863 	} else {
3864 		// Firmware limits are 8-bit, PWM control is 16-bit.
3865 		*max = 0x101 * caps->max_input_signal;
3866 		*min = 0x101 * caps->min_input_signal;
3867 	}
3868 	return 1;
3869 }
3870 
3871 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3872 					uint32_t brightness)
3873 {
3874 	unsigned min, max;
3875 
3876 	if (!get_brightness_range(caps, &min, &max))
3877 		return brightness;
3878 
3879 	// Rescale 0..255 to min..max
3880 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3881 				       AMDGPU_MAX_BL_LEVEL);
3882 }
3883 
3884 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3885 				      uint32_t brightness)
3886 {
3887 	unsigned min, max;
3888 
3889 	if (!get_brightness_range(caps, &min, &max))
3890 		return brightness;
3891 
3892 	if (brightness < min)
3893 		return 0;
3894 	// Rescale min..max to 0..255
3895 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3896 				 max - min);
3897 }
3898 
3899 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3900 					 int bl_idx,
3901 					 u32 user_brightness)
3902 {
3903 	struct amdgpu_dm_backlight_caps caps;
3904 	struct dc_link *link;
3905 	u32 brightness;
3906 	bool rc;
3907 
3908 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3909 	caps = dm->backlight_caps[bl_idx];
3910 
3911 	dm->brightness[bl_idx] = user_brightness;
3912 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3913 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3914 
3915 	/* Change brightness based on AUX property */
3916 	if (caps.aux_support) {
3917 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3918 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3919 		if (!rc)
3920 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3921 	} else {
3922 		rc = dc_link_set_backlight_level(link, brightness, 0);
3923 		if (!rc)
3924 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3925 	}
3926 
3927 	return rc ? 0 : 1;
3928 }
3929 
3930 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3931 {
3932 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3933 	int i;
3934 
3935 	for (i = 0; i < dm->num_of_edps; i++) {
3936 		if (bd == dm->backlight_dev[i])
3937 			break;
3938 	}
3939 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3940 		i = 0;
3941 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3942 
3943 	return 0;
3944 }
3945 
3946 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3947 					 int bl_idx)
3948 {
3949 	struct amdgpu_dm_backlight_caps caps;
3950 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3951 
3952 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3953 	caps = dm->backlight_caps[bl_idx];
3954 
3955 	if (caps.aux_support) {
3956 		u32 avg, peak;
3957 		bool rc;
3958 
3959 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3960 		if (!rc)
3961 			return dm->brightness[bl_idx];
3962 		return convert_brightness_to_user(&caps, avg);
3963 	} else {
3964 		int ret = dc_link_get_backlight_level(link);
3965 
3966 		if (ret == DC_ERROR_UNEXPECTED)
3967 			return dm->brightness[bl_idx];
3968 		return convert_brightness_to_user(&caps, ret);
3969 	}
3970 }
3971 
3972 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3973 {
3974 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3975 	int i;
3976 
3977 	for (i = 0; i < dm->num_of_edps; i++) {
3978 		if (bd == dm->backlight_dev[i])
3979 			break;
3980 	}
3981 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3982 		i = 0;
3983 	return amdgpu_dm_backlight_get_level(dm, i);
3984 }
3985 
3986 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3987 	.options = BL_CORE_SUSPENDRESUME,
3988 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3989 	.update_status	= amdgpu_dm_backlight_update_status,
3990 };
3991 
3992 static void
3993 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3994 {
3995 	char bl_name[16];
3996 	struct backlight_properties props = { 0 };
3997 
3998 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3999 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4000 
4001 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4002 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4003 	props.type = BACKLIGHT_RAW;
4004 
4005 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4006 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4007 
4008 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4009 								       adev_to_drm(dm->adev)->dev,
4010 								       dm,
4011 								       &amdgpu_dm_backlight_ops,
4012 								       &props);
4013 
4014 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4015 		DRM_ERROR("DM: Backlight registration failed!\n");
4016 	else
4017 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4018 }
4019 #endif
4020 
4021 static int initialize_plane(struct amdgpu_display_manager *dm,
4022 			    struct amdgpu_mode_info *mode_info, int plane_id,
4023 			    enum drm_plane_type plane_type,
4024 			    const struct dc_plane_cap *plane_cap)
4025 {
4026 	struct drm_plane *plane;
4027 	unsigned long possible_crtcs;
4028 	int ret = 0;
4029 
4030 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4031 	if (!plane) {
4032 		DRM_ERROR("KMS: Failed to allocate plane\n");
4033 		return -ENOMEM;
4034 	}
4035 	plane->type = plane_type;
4036 
4037 	/*
4038 	 * HACK: IGT tests expect that the primary plane for a CRTC
4039 	 * can only have one possible CRTC. Only expose support for
4040 	 * any CRTC if they're not going to be used as a primary plane
4041 	 * for a CRTC - like overlay or underlay planes.
4042 	 */
4043 	possible_crtcs = 1 << plane_id;
4044 	if (plane_id >= dm->dc->caps.max_streams)
4045 		possible_crtcs = 0xff;
4046 
4047 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4048 
4049 	if (ret) {
4050 		DRM_ERROR("KMS: Failed to initialize plane\n");
4051 		kfree(plane);
4052 		return ret;
4053 	}
4054 
4055 	if (mode_info)
4056 		mode_info->planes[plane_id] = plane;
4057 
4058 	return ret;
4059 }
4060 
4061 
4062 static void register_backlight_device(struct amdgpu_display_manager *dm,
4063 				      struct dc_link *link)
4064 {
4065 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4066 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4067 
4068 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4069 	    link->type != dc_connection_none) {
4070 		/*
4071 		 * Event if registration failed, we should continue with
4072 		 * DM initialization because not having a backlight control
4073 		 * is better then a black screen.
4074 		 */
4075 		if (!dm->backlight_dev[dm->num_of_edps])
4076 			amdgpu_dm_register_backlight_device(dm);
4077 
4078 		if (dm->backlight_dev[dm->num_of_edps]) {
4079 			dm->backlight_link[dm->num_of_edps] = link;
4080 			dm->num_of_edps++;
4081 		}
4082 	}
4083 #endif
4084 }
4085 
4086 
4087 /*
4088  * In this architecture, the association
4089  * connector -> encoder -> crtc
4090  * id not really requried. The crtc and connector will hold the
4091  * display_index as an abstraction to use with DAL component
4092  *
4093  * Returns 0 on success
4094  */
4095 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4096 {
4097 	struct amdgpu_display_manager *dm = &adev->dm;
4098 	int32_t i;
4099 	struct amdgpu_dm_connector *aconnector = NULL;
4100 	struct amdgpu_encoder *aencoder = NULL;
4101 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4102 	uint32_t link_cnt;
4103 	int32_t primary_planes;
4104 	enum dc_connection_type new_connection_type = dc_connection_none;
4105 	const struct dc_plane_cap *plane;
4106 	bool psr_feature_enabled = false;
4107 
4108 	dm->display_indexes_num = dm->dc->caps.max_streams;
4109 	/* Update the actual used number of crtc */
4110 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4111 
4112 	link_cnt = dm->dc->caps.max_links;
4113 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4114 		DRM_ERROR("DM: Failed to initialize mode config\n");
4115 		return -EINVAL;
4116 	}
4117 
4118 	/* There is one primary plane per CRTC */
4119 	primary_planes = dm->dc->caps.max_streams;
4120 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4121 
4122 	/*
4123 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4124 	 * Order is reversed to match iteration order in atomic check.
4125 	 */
4126 	for (i = (primary_planes - 1); i >= 0; i--) {
4127 		plane = &dm->dc->caps.planes[i];
4128 
4129 		if (initialize_plane(dm, mode_info, i,
4130 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4131 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4132 			goto fail;
4133 		}
4134 	}
4135 
4136 	/*
4137 	 * Initialize overlay planes, index starting after primary planes.
4138 	 * These planes have a higher DRM index than the primary planes since
4139 	 * they should be considered as having a higher z-order.
4140 	 * Order is reversed to match iteration order in atomic check.
4141 	 *
4142 	 * Only support DCN for now, and only expose one so we don't encourage
4143 	 * userspace to use up all the pipes.
4144 	 */
4145 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4146 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4147 
4148 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4149 			continue;
4150 
4151 		if (!plane->blends_with_above || !plane->blends_with_below)
4152 			continue;
4153 
4154 		if (!plane->pixel_format_support.argb8888)
4155 			continue;
4156 
4157 		if (initialize_plane(dm, NULL, primary_planes + i,
4158 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4159 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4160 			goto fail;
4161 		}
4162 
4163 		/* Only create one overlay plane. */
4164 		break;
4165 	}
4166 
4167 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4168 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4169 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4170 			goto fail;
4171 		}
4172 
4173 #if defined(CONFIG_DRM_AMD_DC_DCN)
4174 	/* Use Outbox interrupt */
4175 	switch (adev->ip_versions[DCE_HWIP][0]) {
4176 	case IP_VERSION(3, 0, 0):
4177 	case IP_VERSION(3, 1, 2):
4178 	case IP_VERSION(3, 1, 3):
4179 	case IP_VERSION(2, 1, 0):
4180 		if (register_outbox_irq_handlers(dm->adev)) {
4181 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4182 			goto fail;
4183 		}
4184 		break;
4185 	default:
4186 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4187 			      adev->ip_versions[DCE_HWIP][0]);
4188 	}
4189 
4190 	/* Determine whether to enable PSR support by default. */
4191 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4192 		switch (adev->ip_versions[DCE_HWIP][0]) {
4193 		case IP_VERSION(3, 1, 2):
4194 		case IP_VERSION(3, 1, 3):
4195 			psr_feature_enabled = true;
4196 			break;
4197 		default:
4198 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4199 			break;
4200 		}
4201 	}
4202 #endif
4203 
4204 	/* loops over all connectors on the board */
4205 	for (i = 0; i < link_cnt; i++) {
4206 		struct dc_link *link = NULL;
4207 
4208 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4209 			DRM_ERROR(
4210 				"KMS: Cannot support more than %d display indexes\n",
4211 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4212 			continue;
4213 		}
4214 
4215 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4216 		if (!aconnector)
4217 			goto fail;
4218 
4219 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4220 		if (!aencoder)
4221 			goto fail;
4222 
4223 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4224 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4225 			goto fail;
4226 		}
4227 
4228 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4229 			DRM_ERROR("KMS: Failed to initialize connector\n");
4230 			goto fail;
4231 		}
4232 
4233 		link = dc_get_link_at_index(dm->dc, i);
4234 
4235 		if (!dc_link_detect_sink(link, &new_connection_type))
4236 			DRM_ERROR("KMS: Failed to detect connector\n");
4237 
4238 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4239 			emulated_link_detect(link);
4240 			amdgpu_dm_update_connector_after_detect(aconnector);
4241 
4242 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4243 			amdgpu_dm_update_connector_after_detect(aconnector);
4244 			register_backlight_device(dm, link);
4245 			if (dm->num_of_edps)
4246 				update_connector_ext_caps(aconnector);
4247 			if (psr_feature_enabled)
4248 				amdgpu_dm_set_psr_caps(link);
4249 		}
4250 
4251 
4252 	}
4253 
4254 	/* Software is initialized. Now we can register interrupt handlers. */
4255 	switch (adev->asic_type) {
4256 #if defined(CONFIG_DRM_AMD_DC_SI)
4257 	case CHIP_TAHITI:
4258 	case CHIP_PITCAIRN:
4259 	case CHIP_VERDE:
4260 	case CHIP_OLAND:
4261 		if (dce60_register_irq_handlers(dm->adev)) {
4262 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4263 			goto fail;
4264 		}
4265 		break;
4266 #endif
4267 	case CHIP_BONAIRE:
4268 	case CHIP_HAWAII:
4269 	case CHIP_KAVERI:
4270 	case CHIP_KABINI:
4271 	case CHIP_MULLINS:
4272 	case CHIP_TONGA:
4273 	case CHIP_FIJI:
4274 	case CHIP_CARRIZO:
4275 	case CHIP_STONEY:
4276 	case CHIP_POLARIS11:
4277 	case CHIP_POLARIS10:
4278 	case CHIP_POLARIS12:
4279 	case CHIP_VEGAM:
4280 	case CHIP_VEGA10:
4281 	case CHIP_VEGA12:
4282 	case CHIP_VEGA20:
4283 		if (dce110_register_irq_handlers(dm->adev)) {
4284 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4285 			goto fail;
4286 		}
4287 		break;
4288 	default:
4289 #if defined(CONFIG_DRM_AMD_DC_DCN)
4290 		switch (adev->ip_versions[DCE_HWIP][0]) {
4291 		case IP_VERSION(1, 0, 0):
4292 		case IP_VERSION(1, 0, 1):
4293 		case IP_VERSION(2, 0, 2):
4294 		case IP_VERSION(2, 0, 3):
4295 		case IP_VERSION(2, 0, 0):
4296 		case IP_VERSION(2, 1, 0):
4297 		case IP_VERSION(3, 0, 0):
4298 		case IP_VERSION(3, 0, 2):
4299 		case IP_VERSION(3, 0, 3):
4300 		case IP_VERSION(3, 0, 1):
4301 		case IP_VERSION(3, 1, 2):
4302 		case IP_VERSION(3, 1, 3):
4303 			if (dcn10_register_irq_handlers(dm->adev)) {
4304 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4305 				goto fail;
4306 			}
4307 			break;
4308 		default:
4309 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4310 					adev->ip_versions[DCE_HWIP][0]);
4311 			goto fail;
4312 		}
4313 #endif
4314 		break;
4315 	}
4316 
4317 	return 0;
4318 fail:
4319 	kfree(aencoder);
4320 	kfree(aconnector);
4321 
4322 	return -EINVAL;
4323 }
4324 
4325 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4326 {
4327 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4328 	return;
4329 }
4330 
4331 /******************************************************************************
4332  * amdgpu_display_funcs functions
4333  *****************************************************************************/
4334 
4335 /*
4336  * dm_bandwidth_update - program display watermarks
4337  *
4338  * @adev: amdgpu_device pointer
4339  *
4340  * Calculate and program the display watermarks and line buffer allocation.
4341  */
4342 static void dm_bandwidth_update(struct amdgpu_device *adev)
4343 {
4344 	/* TODO: implement later */
4345 }
4346 
4347 static const struct amdgpu_display_funcs dm_display_funcs = {
4348 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4349 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4350 	.backlight_set_level = NULL, /* never called for DC */
4351 	.backlight_get_level = NULL, /* never called for DC */
4352 	.hpd_sense = NULL,/* called unconditionally */
4353 	.hpd_set_polarity = NULL, /* called unconditionally */
4354 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4355 	.page_flip_get_scanoutpos =
4356 		dm_crtc_get_scanoutpos,/* called unconditionally */
4357 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4358 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4359 };
4360 
4361 #if defined(CONFIG_DEBUG_KERNEL_DC)
4362 
4363 static ssize_t s3_debug_store(struct device *device,
4364 			      struct device_attribute *attr,
4365 			      const char *buf,
4366 			      size_t count)
4367 {
4368 	int ret;
4369 	int s3_state;
4370 	struct drm_device *drm_dev = dev_get_drvdata(device);
4371 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4372 
4373 	ret = kstrtoint(buf, 0, &s3_state);
4374 
4375 	if (ret == 0) {
4376 		if (s3_state) {
4377 			dm_resume(adev);
4378 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4379 		} else
4380 			dm_suspend(adev);
4381 	}
4382 
4383 	return ret == 0 ? count : 0;
4384 }
4385 
4386 DEVICE_ATTR_WO(s3_debug);
4387 
4388 #endif
4389 
4390 static int dm_early_init(void *handle)
4391 {
4392 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4393 
4394 	switch (adev->asic_type) {
4395 #if defined(CONFIG_DRM_AMD_DC_SI)
4396 	case CHIP_TAHITI:
4397 	case CHIP_PITCAIRN:
4398 	case CHIP_VERDE:
4399 		adev->mode_info.num_crtc = 6;
4400 		adev->mode_info.num_hpd = 6;
4401 		adev->mode_info.num_dig = 6;
4402 		break;
4403 	case CHIP_OLAND:
4404 		adev->mode_info.num_crtc = 2;
4405 		adev->mode_info.num_hpd = 2;
4406 		adev->mode_info.num_dig = 2;
4407 		break;
4408 #endif
4409 	case CHIP_BONAIRE:
4410 	case CHIP_HAWAII:
4411 		adev->mode_info.num_crtc = 6;
4412 		adev->mode_info.num_hpd = 6;
4413 		adev->mode_info.num_dig = 6;
4414 		break;
4415 	case CHIP_KAVERI:
4416 		adev->mode_info.num_crtc = 4;
4417 		adev->mode_info.num_hpd = 6;
4418 		adev->mode_info.num_dig = 7;
4419 		break;
4420 	case CHIP_KABINI:
4421 	case CHIP_MULLINS:
4422 		adev->mode_info.num_crtc = 2;
4423 		adev->mode_info.num_hpd = 6;
4424 		adev->mode_info.num_dig = 6;
4425 		break;
4426 	case CHIP_FIJI:
4427 	case CHIP_TONGA:
4428 		adev->mode_info.num_crtc = 6;
4429 		adev->mode_info.num_hpd = 6;
4430 		adev->mode_info.num_dig = 7;
4431 		break;
4432 	case CHIP_CARRIZO:
4433 		adev->mode_info.num_crtc = 3;
4434 		adev->mode_info.num_hpd = 6;
4435 		adev->mode_info.num_dig = 9;
4436 		break;
4437 	case CHIP_STONEY:
4438 		adev->mode_info.num_crtc = 2;
4439 		adev->mode_info.num_hpd = 6;
4440 		adev->mode_info.num_dig = 9;
4441 		break;
4442 	case CHIP_POLARIS11:
4443 	case CHIP_POLARIS12:
4444 		adev->mode_info.num_crtc = 5;
4445 		adev->mode_info.num_hpd = 5;
4446 		adev->mode_info.num_dig = 5;
4447 		break;
4448 	case CHIP_POLARIS10:
4449 	case CHIP_VEGAM:
4450 		adev->mode_info.num_crtc = 6;
4451 		adev->mode_info.num_hpd = 6;
4452 		adev->mode_info.num_dig = 6;
4453 		break;
4454 	case CHIP_VEGA10:
4455 	case CHIP_VEGA12:
4456 	case CHIP_VEGA20:
4457 		adev->mode_info.num_crtc = 6;
4458 		adev->mode_info.num_hpd = 6;
4459 		adev->mode_info.num_dig = 6;
4460 		break;
4461 	default:
4462 #if defined(CONFIG_DRM_AMD_DC_DCN)
4463 		switch (adev->ip_versions[DCE_HWIP][0]) {
4464 		case IP_VERSION(2, 0, 2):
4465 		case IP_VERSION(3, 0, 0):
4466 			adev->mode_info.num_crtc = 6;
4467 			adev->mode_info.num_hpd = 6;
4468 			adev->mode_info.num_dig = 6;
4469 			break;
4470 		case IP_VERSION(2, 0, 0):
4471 		case IP_VERSION(3, 0, 2):
4472 			adev->mode_info.num_crtc = 5;
4473 			adev->mode_info.num_hpd = 5;
4474 			adev->mode_info.num_dig = 5;
4475 			break;
4476 		case IP_VERSION(2, 0, 3):
4477 		case IP_VERSION(3, 0, 3):
4478 			adev->mode_info.num_crtc = 2;
4479 			adev->mode_info.num_hpd = 2;
4480 			adev->mode_info.num_dig = 2;
4481 			break;
4482 		case IP_VERSION(1, 0, 0):
4483 		case IP_VERSION(1, 0, 1):
4484 		case IP_VERSION(3, 0, 1):
4485 		case IP_VERSION(2, 1, 0):
4486 		case IP_VERSION(3, 1, 2):
4487 		case IP_VERSION(3, 1, 3):
4488 			adev->mode_info.num_crtc = 4;
4489 			adev->mode_info.num_hpd = 4;
4490 			adev->mode_info.num_dig = 4;
4491 			break;
4492 		default:
4493 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4494 					adev->ip_versions[DCE_HWIP][0]);
4495 			return -EINVAL;
4496 		}
4497 #endif
4498 		break;
4499 	}
4500 
4501 	amdgpu_dm_set_irq_funcs(adev);
4502 
4503 	if (adev->mode_info.funcs == NULL)
4504 		adev->mode_info.funcs = &dm_display_funcs;
4505 
4506 	/*
4507 	 * Note: Do NOT change adev->audio_endpt_rreg and
4508 	 * adev->audio_endpt_wreg because they are initialised in
4509 	 * amdgpu_device_init()
4510 	 */
4511 #if defined(CONFIG_DEBUG_KERNEL_DC)
4512 	device_create_file(
4513 		adev_to_drm(adev)->dev,
4514 		&dev_attr_s3_debug);
4515 #endif
4516 
4517 	return 0;
4518 }
4519 
4520 static bool modeset_required(struct drm_crtc_state *crtc_state,
4521 			     struct dc_stream_state *new_stream,
4522 			     struct dc_stream_state *old_stream)
4523 {
4524 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4525 }
4526 
4527 static bool modereset_required(struct drm_crtc_state *crtc_state)
4528 {
4529 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4530 }
4531 
4532 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4533 {
4534 	drm_encoder_cleanup(encoder);
4535 	kfree(encoder);
4536 }
4537 
4538 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4539 	.destroy = amdgpu_dm_encoder_destroy,
4540 };
4541 
4542 
4543 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4544 					 struct drm_framebuffer *fb,
4545 					 int *min_downscale, int *max_upscale)
4546 {
4547 	struct amdgpu_device *adev = drm_to_adev(dev);
4548 	struct dc *dc = adev->dm.dc;
4549 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4550 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4551 
4552 	switch (fb->format->format) {
4553 	case DRM_FORMAT_P010:
4554 	case DRM_FORMAT_NV12:
4555 	case DRM_FORMAT_NV21:
4556 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4557 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4558 		break;
4559 
4560 	case DRM_FORMAT_XRGB16161616F:
4561 	case DRM_FORMAT_ARGB16161616F:
4562 	case DRM_FORMAT_XBGR16161616F:
4563 	case DRM_FORMAT_ABGR16161616F:
4564 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4565 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4566 		break;
4567 
4568 	default:
4569 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4570 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4571 		break;
4572 	}
4573 
4574 	/*
4575 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4576 	 * scaling factor of 1.0 == 1000 units.
4577 	 */
4578 	if (*max_upscale == 1)
4579 		*max_upscale = 1000;
4580 
4581 	if (*min_downscale == 1)
4582 		*min_downscale = 1000;
4583 }
4584 
4585 
4586 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4587 				const struct drm_plane_state *state,
4588 				struct dc_scaling_info *scaling_info)
4589 {
4590 	int scale_w, scale_h, min_downscale, max_upscale;
4591 
4592 	memset(scaling_info, 0, sizeof(*scaling_info));
4593 
4594 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4595 	scaling_info->src_rect.x = state->src_x >> 16;
4596 	scaling_info->src_rect.y = state->src_y >> 16;
4597 
4598 	/*
4599 	 * For reasons we don't (yet) fully understand a non-zero
4600 	 * src_y coordinate into an NV12 buffer can cause a
4601 	 * system hang on DCN1x.
4602 	 * To avoid hangs (and maybe be overly cautious)
4603 	 * let's reject both non-zero src_x and src_y.
4604 	 *
4605 	 * We currently know of only one use-case to reproduce a
4606 	 * scenario with non-zero src_x and src_y for NV12, which
4607 	 * is to gesture the YouTube Android app into full screen
4608 	 * on ChromeOS.
4609 	 */
4610 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4611 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4612 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4613 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4614 		return -EINVAL;
4615 
4616 	scaling_info->src_rect.width = state->src_w >> 16;
4617 	if (scaling_info->src_rect.width == 0)
4618 		return -EINVAL;
4619 
4620 	scaling_info->src_rect.height = state->src_h >> 16;
4621 	if (scaling_info->src_rect.height == 0)
4622 		return -EINVAL;
4623 
4624 	scaling_info->dst_rect.x = state->crtc_x;
4625 	scaling_info->dst_rect.y = state->crtc_y;
4626 
4627 	if (state->crtc_w == 0)
4628 		return -EINVAL;
4629 
4630 	scaling_info->dst_rect.width = state->crtc_w;
4631 
4632 	if (state->crtc_h == 0)
4633 		return -EINVAL;
4634 
4635 	scaling_info->dst_rect.height = state->crtc_h;
4636 
4637 	/* DRM doesn't specify clipping on destination output. */
4638 	scaling_info->clip_rect = scaling_info->dst_rect;
4639 
4640 	/* Validate scaling per-format with DC plane caps */
4641 	if (state->plane && state->plane->dev && state->fb) {
4642 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4643 					     &min_downscale, &max_upscale);
4644 	} else {
4645 		min_downscale = 250;
4646 		max_upscale = 16000;
4647 	}
4648 
4649 	scale_w = scaling_info->dst_rect.width * 1000 /
4650 		  scaling_info->src_rect.width;
4651 
4652 	if (scale_w < min_downscale || scale_w > max_upscale)
4653 		return -EINVAL;
4654 
4655 	scale_h = scaling_info->dst_rect.height * 1000 /
4656 		  scaling_info->src_rect.height;
4657 
4658 	if (scale_h < min_downscale || scale_h > max_upscale)
4659 		return -EINVAL;
4660 
4661 	/*
4662 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4663 	 * assume reasonable defaults based on the format.
4664 	 */
4665 
4666 	return 0;
4667 }
4668 
4669 static void
4670 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4671 				 uint64_t tiling_flags)
4672 {
4673 	/* Fill GFX8 params */
4674 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4675 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4676 
4677 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4678 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4679 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4680 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4681 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4682 
4683 		/* XXX fix me for VI */
4684 		tiling_info->gfx8.num_banks = num_banks;
4685 		tiling_info->gfx8.array_mode =
4686 				DC_ARRAY_2D_TILED_THIN1;
4687 		tiling_info->gfx8.tile_split = tile_split;
4688 		tiling_info->gfx8.bank_width = bankw;
4689 		tiling_info->gfx8.bank_height = bankh;
4690 		tiling_info->gfx8.tile_aspect = mtaspect;
4691 		tiling_info->gfx8.tile_mode =
4692 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4693 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4694 			== DC_ARRAY_1D_TILED_THIN1) {
4695 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4696 	}
4697 
4698 	tiling_info->gfx8.pipe_config =
4699 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4700 }
4701 
4702 static void
4703 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4704 				  union dc_tiling_info *tiling_info)
4705 {
4706 	tiling_info->gfx9.num_pipes =
4707 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4708 	tiling_info->gfx9.num_banks =
4709 		adev->gfx.config.gb_addr_config_fields.num_banks;
4710 	tiling_info->gfx9.pipe_interleave =
4711 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4712 	tiling_info->gfx9.num_shader_engines =
4713 		adev->gfx.config.gb_addr_config_fields.num_se;
4714 	tiling_info->gfx9.max_compressed_frags =
4715 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4716 	tiling_info->gfx9.num_rb_per_se =
4717 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4718 	tiling_info->gfx9.shaderEnable = 1;
4719 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4720 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4721 }
4722 
4723 static int
4724 validate_dcc(struct amdgpu_device *adev,
4725 	     const enum surface_pixel_format format,
4726 	     const enum dc_rotation_angle rotation,
4727 	     const union dc_tiling_info *tiling_info,
4728 	     const struct dc_plane_dcc_param *dcc,
4729 	     const struct dc_plane_address *address,
4730 	     const struct plane_size *plane_size)
4731 {
4732 	struct dc *dc = adev->dm.dc;
4733 	struct dc_dcc_surface_param input;
4734 	struct dc_surface_dcc_cap output;
4735 
4736 	memset(&input, 0, sizeof(input));
4737 	memset(&output, 0, sizeof(output));
4738 
4739 	if (!dcc->enable)
4740 		return 0;
4741 
4742 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4743 	    !dc->cap_funcs.get_dcc_compression_cap)
4744 		return -EINVAL;
4745 
4746 	input.format = format;
4747 	input.surface_size.width = plane_size->surface_size.width;
4748 	input.surface_size.height = plane_size->surface_size.height;
4749 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4750 
4751 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4752 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4753 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4754 		input.scan = SCAN_DIRECTION_VERTICAL;
4755 
4756 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4757 		return -EINVAL;
4758 
4759 	if (!output.capable)
4760 		return -EINVAL;
4761 
4762 	if (dcc->independent_64b_blks == 0 &&
4763 	    output.grph.rgb.independent_64b_blks != 0)
4764 		return -EINVAL;
4765 
4766 	return 0;
4767 }
4768 
4769 static bool
4770 modifier_has_dcc(uint64_t modifier)
4771 {
4772 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4773 }
4774 
4775 static unsigned
4776 modifier_gfx9_swizzle_mode(uint64_t modifier)
4777 {
4778 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4779 		return 0;
4780 
4781 	return AMD_FMT_MOD_GET(TILE, modifier);
4782 }
4783 
4784 static const struct drm_format_info *
4785 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4786 {
4787 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4788 }
4789 
4790 static void
4791 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4792 				    union dc_tiling_info *tiling_info,
4793 				    uint64_t modifier)
4794 {
4795 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4796 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4797 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4798 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4799 
4800 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4801 
4802 	if (!IS_AMD_FMT_MOD(modifier))
4803 		return;
4804 
4805 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4806 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4807 
4808 	if (adev->family >= AMDGPU_FAMILY_NV) {
4809 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4810 	} else {
4811 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4812 
4813 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4814 	}
4815 }
4816 
4817 enum dm_micro_swizzle {
4818 	MICRO_SWIZZLE_Z = 0,
4819 	MICRO_SWIZZLE_S = 1,
4820 	MICRO_SWIZZLE_D = 2,
4821 	MICRO_SWIZZLE_R = 3
4822 };
4823 
4824 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4825 					  uint32_t format,
4826 					  uint64_t modifier)
4827 {
4828 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4829 	const struct drm_format_info *info = drm_format_info(format);
4830 	int i;
4831 
4832 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4833 
4834 	if (!info)
4835 		return false;
4836 
4837 	/*
4838 	 * We always have to allow these modifiers:
4839 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4840 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4841 	 */
4842 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4843 	    modifier == DRM_FORMAT_MOD_INVALID) {
4844 		return true;
4845 	}
4846 
4847 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4848 	for (i = 0; i < plane->modifier_count; i++) {
4849 		if (modifier == plane->modifiers[i])
4850 			break;
4851 	}
4852 	if (i == plane->modifier_count)
4853 		return false;
4854 
4855 	/*
4856 	 * For D swizzle the canonical modifier depends on the bpp, so check
4857 	 * it here.
4858 	 */
4859 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4860 	    adev->family >= AMDGPU_FAMILY_NV) {
4861 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4862 			return false;
4863 	}
4864 
4865 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4866 	    info->cpp[0] < 8)
4867 		return false;
4868 
4869 	if (modifier_has_dcc(modifier)) {
4870 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4871 		if (info->cpp[0] != 4)
4872 			return false;
4873 		/* We support multi-planar formats, but not when combined with
4874 		 * additional DCC metadata planes. */
4875 		if (info->num_planes > 1)
4876 			return false;
4877 	}
4878 
4879 	return true;
4880 }
4881 
4882 static void
4883 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4884 {
4885 	if (!*mods)
4886 		return;
4887 
4888 	if (*cap - *size < 1) {
4889 		uint64_t new_cap = *cap * 2;
4890 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4891 
4892 		if (!new_mods) {
4893 			kfree(*mods);
4894 			*mods = NULL;
4895 			return;
4896 		}
4897 
4898 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4899 		kfree(*mods);
4900 		*mods = new_mods;
4901 		*cap = new_cap;
4902 	}
4903 
4904 	(*mods)[*size] = mod;
4905 	*size += 1;
4906 }
4907 
4908 static void
4909 add_gfx9_modifiers(const struct amdgpu_device *adev,
4910 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4911 {
4912 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4913 	int pipe_xor_bits = min(8, pipes +
4914 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4915 	int bank_xor_bits = min(8 - pipe_xor_bits,
4916 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4917 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4918 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4919 
4920 
4921 	if (adev->family == AMDGPU_FAMILY_RV) {
4922 		/* Raven2 and later */
4923 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4924 
4925 		/*
4926 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4927 		 * doesn't support _D on DCN
4928 		 */
4929 
4930 		if (has_constant_encode) {
4931 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4932 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4933 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4934 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4935 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4936 				    AMD_FMT_MOD_SET(DCC, 1) |
4937 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4938 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4939 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4940 		}
4941 
4942 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4943 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4944 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4945 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4946 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4947 			    AMD_FMT_MOD_SET(DCC, 1) |
4948 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4949 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4950 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4951 
4952 		if (has_constant_encode) {
4953 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4954 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4955 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4956 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4957 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4958 				    AMD_FMT_MOD_SET(DCC, 1) |
4959 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4960 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4961 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4962 
4963 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4964 				    AMD_FMT_MOD_SET(RB, rb) |
4965 				    AMD_FMT_MOD_SET(PIPE, pipes));
4966 		}
4967 
4968 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4969 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4970 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4971 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4972 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4973 			    AMD_FMT_MOD_SET(DCC, 1) |
4974 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4975 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4976 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4977 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4978 			    AMD_FMT_MOD_SET(RB, rb) |
4979 			    AMD_FMT_MOD_SET(PIPE, pipes));
4980 	}
4981 
4982 	/*
4983 	 * Only supported for 64bpp on Raven, will be filtered on format in
4984 	 * dm_plane_format_mod_supported.
4985 	 */
4986 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4987 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4988 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4989 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4990 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4991 
4992 	if (adev->family == AMDGPU_FAMILY_RV) {
4993 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4994 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4995 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4996 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4997 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4998 	}
4999 
5000 	/*
5001 	 * Only supported for 64bpp on Raven, will be filtered on format in
5002 	 * dm_plane_format_mod_supported.
5003 	 */
5004 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5005 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5006 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5007 
5008 	if (adev->family == AMDGPU_FAMILY_RV) {
5009 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5010 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5011 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5012 	}
5013 }
5014 
5015 static void
5016 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5017 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5018 {
5019 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5020 
5021 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5022 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5023 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5024 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5025 		    AMD_FMT_MOD_SET(DCC, 1) |
5026 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5027 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5028 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5029 
5030 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5031 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5032 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5033 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5034 		    AMD_FMT_MOD_SET(DCC, 1) |
5035 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5036 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5037 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5038 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5039 
5040 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5041 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5042 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5043 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5044 
5045 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5046 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5047 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5048 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5049 
5050 
5051 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5052 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5053 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5054 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5055 
5056 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5057 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5058 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5059 }
5060 
5061 static void
5062 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5063 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5064 {
5065 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5066 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5067 
5068 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5069 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5070 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5071 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5072 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5073 		    AMD_FMT_MOD_SET(DCC, 1) |
5074 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5075 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5076 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5077 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5078 
5079 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5080 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5081 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5082 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5083 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5084 		    AMD_FMT_MOD_SET(DCC, 1) |
5085 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5086 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5087 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5088 
5089 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5090 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5091 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5092 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5093 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5094 		    AMD_FMT_MOD_SET(DCC, 1) |
5095 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5096 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5097 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5098 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5099 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5100 
5101 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5102 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5103 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5104 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5105 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5106 		    AMD_FMT_MOD_SET(DCC, 1) |
5107 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5108 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5109 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5110 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5111 
5112 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5113 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5114 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5115 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5116 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5117 
5118 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5119 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5120 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5121 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5122 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5123 
5124 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5125 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5126 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5127 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5128 
5129 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5130 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5131 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5132 }
5133 
5134 static int
5135 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5136 {
5137 	uint64_t size = 0, capacity = 128;
5138 	*mods = NULL;
5139 
5140 	/* We have not hooked up any pre-GFX9 modifiers. */
5141 	if (adev->family < AMDGPU_FAMILY_AI)
5142 		return 0;
5143 
5144 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5145 
5146 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5147 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5148 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5149 		return *mods ? 0 : -ENOMEM;
5150 	}
5151 
5152 	switch (adev->family) {
5153 	case AMDGPU_FAMILY_AI:
5154 	case AMDGPU_FAMILY_RV:
5155 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5156 		break;
5157 	case AMDGPU_FAMILY_NV:
5158 	case AMDGPU_FAMILY_VGH:
5159 	case AMDGPU_FAMILY_YC:
5160 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5161 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5162 		else
5163 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5164 		break;
5165 	}
5166 
5167 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5168 
5169 	/* INVALID marks the end of the list. */
5170 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5171 
5172 	if (!*mods)
5173 		return -ENOMEM;
5174 
5175 	return 0;
5176 }
5177 
5178 static int
5179 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5180 					  const struct amdgpu_framebuffer *afb,
5181 					  const enum surface_pixel_format format,
5182 					  const enum dc_rotation_angle rotation,
5183 					  const struct plane_size *plane_size,
5184 					  union dc_tiling_info *tiling_info,
5185 					  struct dc_plane_dcc_param *dcc,
5186 					  struct dc_plane_address *address,
5187 					  const bool force_disable_dcc)
5188 {
5189 	const uint64_t modifier = afb->base.modifier;
5190 	int ret = 0;
5191 
5192 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5193 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5194 
5195 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5196 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5197 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5198 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5199 
5200 		dcc->enable = 1;
5201 		dcc->meta_pitch = afb->base.pitches[1];
5202 		dcc->independent_64b_blks = independent_64b_blks;
5203 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5204 			if (independent_64b_blks && independent_128b_blks)
5205 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5206 			else if (independent_128b_blks)
5207 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5208 			else if (independent_64b_blks && !independent_128b_blks)
5209 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5210 			else
5211 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5212 		} else {
5213 			if (independent_64b_blks)
5214 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5215 			else
5216 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5217 		}
5218 
5219 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5220 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5221 	}
5222 
5223 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5224 	if (ret)
5225 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5226 
5227 	return ret;
5228 }
5229 
5230 static int
5231 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5232 			     const struct amdgpu_framebuffer *afb,
5233 			     const enum surface_pixel_format format,
5234 			     const enum dc_rotation_angle rotation,
5235 			     const uint64_t tiling_flags,
5236 			     union dc_tiling_info *tiling_info,
5237 			     struct plane_size *plane_size,
5238 			     struct dc_plane_dcc_param *dcc,
5239 			     struct dc_plane_address *address,
5240 			     bool tmz_surface,
5241 			     bool force_disable_dcc)
5242 {
5243 	const struct drm_framebuffer *fb = &afb->base;
5244 	int ret;
5245 
5246 	memset(tiling_info, 0, sizeof(*tiling_info));
5247 	memset(plane_size, 0, sizeof(*plane_size));
5248 	memset(dcc, 0, sizeof(*dcc));
5249 	memset(address, 0, sizeof(*address));
5250 
5251 	address->tmz_surface = tmz_surface;
5252 
5253 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5254 		uint64_t addr = afb->address + fb->offsets[0];
5255 
5256 		plane_size->surface_size.x = 0;
5257 		plane_size->surface_size.y = 0;
5258 		plane_size->surface_size.width = fb->width;
5259 		plane_size->surface_size.height = fb->height;
5260 		plane_size->surface_pitch =
5261 			fb->pitches[0] / fb->format->cpp[0];
5262 
5263 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5264 		address->grph.addr.low_part = lower_32_bits(addr);
5265 		address->grph.addr.high_part = upper_32_bits(addr);
5266 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5267 		uint64_t luma_addr = afb->address + fb->offsets[0];
5268 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5269 
5270 		plane_size->surface_size.x = 0;
5271 		plane_size->surface_size.y = 0;
5272 		plane_size->surface_size.width = fb->width;
5273 		plane_size->surface_size.height = fb->height;
5274 		plane_size->surface_pitch =
5275 			fb->pitches[0] / fb->format->cpp[0];
5276 
5277 		plane_size->chroma_size.x = 0;
5278 		plane_size->chroma_size.y = 0;
5279 		/* TODO: set these based on surface format */
5280 		plane_size->chroma_size.width = fb->width / 2;
5281 		plane_size->chroma_size.height = fb->height / 2;
5282 
5283 		plane_size->chroma_pitch =
5284 			fb->pitches[1] / fb->format->cpp[1];
5285 
5286 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5287 		address->video_progressive.luma_addr.low_part =
5288 			lower_32_bits(luma_addr);
5289 		address->video_progressive.luma_addr.high_part =
5290 			upper_32_bits(luma_addr);
5291 		address->video_progressive.chroma_addr.low_part =
5292 			lower_32_bits(chroma_addr);
5293 		address->video_progressive.chroma_addr.high_part =
5294 			upper_32_bits(chroma_addr);
5295 	}
5296 
5297 	if (adev->family >= AMDGPU_FAMILY_AI) {
5298 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5299 								rotation, plane_size,
5300 								tiling_info, dcc,
5301 								address,
5302 								force_disable_dcc);
5303 		if (ret)
5304 			return ret;
5305 	} else {
5306 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5307 	}
5308 
5309 	return 0;
5310 }
5311 
5312 static void
5313 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5314 			       bool *per_pixel_alpha, bool *global_alpha,
5315 			       int *global_alpha_value)
5316 {
5317 	*per_pixel_alpha = false;
5318 	*global_alpha = false;
5319 	*global_alpha_value = 0xff;
5320 
5321 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5322 		return;
5323 
5324 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5325 		static const uint32_t alpha_formats[] = {
5326 			DRM_FORMAT_ARGB8888,
5327 			DRM_FORMAT_RGBA8888,
5328 			DRM_FORMAT_ABGR8888,
5329 		};
5330 		uint32_t format = plane_state->fb->format->format;
5331 		unsigned int i;
5332 
5333 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5334 			if (format == alpha_formats[i]) {
5335 				*per_pixel_alpha = true;
5336 				break;
5337 			}
5338 		}
5339 	}
5340 
5341 	if (plane_state->alpha < 0xffff) {
5342 		*global_alpha = true;
5343 		*global_alpha_value = plane_state->alpha >> 8;
5344 	}
5345 }
5346 
5347 static int
5348 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5349 			    const enum surface_pixel_format format,
5350 			    enum dc_color_space *color_space)
5351 {
5352 	bool full_range;
5353 
5354 	*color_space = COLOR_SPACE_SRGB;
5355 
5356 	/* DRM color properties only affect non-RGB formats. */
5357 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5358 		return 0;
5359 
5360 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5361 
5362 	switch (plane_state->color_encoding) {
5363 	case DRM_COLOR_YCBCR_BT601:
5364 		if (full_range)
5365 			*color_space = COLOR_SPACE_YCBCR601;
5366 		else
5367 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5368 		break;
5369 
5370 	case DRM_COLOR_YCBCR_BT709:
5371 		if (full_range)
5372 			*color_space = COLOR_SPACE_YCBCR709;
5373 		else
5374 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5375 		break;
5376 
5377 	case DRM_COLOR_YCBCR_BT2020:
5378 		if (full_range)
5379 			*color_space = COLOR_SPACE_2020_YCBCR;
5380 		else
5381 			return -EINVAL;
5382 		break;
5383 
5384 	default:
5385 		return -EINVAL;
5386 	}
5387 
5388 	return 0;
5389 }
5390 
5391 static int
5392 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5393 			    const struct drm_plane_state *plane_state,
5394 			    const uint64_t tiling_flags,
5395 			    struct dc_plane_info *plane_info,
5396 			    struct dc_plane_address *address,
5397 			    bool tmz_surface,
5398 			    bool force_disable_dcc)
5399 {
5400 	const struct drm_framebuffer *fb = plane_state->fb;
5401 	const struct amdgpu_framebuffer *afb =
5402 		to_amdgpu_framebuffer(plane_state->fb);
5403 	int ret;
5404 
5405 	memset(plane_info, 0, sizeof(*plane_info));
5406 
5407 	switch (fb->format->format) {
5408 	case DRM_FORMAT_C8:
5409 		plane_info->format =
5410 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5411 		break;
5412 	case DRM_FORMAT_RGB565:
5413 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5414 		break;
5415 	case DRM_FORMAT_XRGB8888:
5416 	case DRM_FORMAT_ARGB8888:
5417 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5418 		break;
5419 	case DRM_FORMAT_XRGB2101010:
5420 	case DRM_FORMAT_ARGB2101010:
5421 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5422 		break;
5423 	case DRM_FORMAT_XBGR2101010:
5424 	case DRM_FORMAT_ABGR2101010:
5425 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5426 		break;
5427 	case DRM_FORMAT_XBGR8888:
5428 	case DRM_FORMAT_ABGR8888:
5429 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5430 		break;
5431 	case DRM_FORMAT_NV21:
5432 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5433 		break;
5434 	case DRM_FORMAT_NV12:
5435 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5436 		break;
5437 	case DRM_FORMAT_P010:
5438 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5439 		break;
5440 	case DRM_FORMAT_XRGB16161616F:
5441 	case DRM_FORMAT_ARGB16161616F:
5442 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5443 		break;
5444 	case DRM_FORMAT_XBGR16161616F:
5445 	case DRM_FORMAT_ABGR16161616F:
5446 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5447 		break;
5448 	case DRM_FORMAT_XRGB16161616:
5449 	case DRM_FORMAT_ARGB16161616:
5450 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5451 		break;
5452 	case DRM_FORMAT_XBGR16161616:
5453 	case DRM_FORMAT_ABGR16161616:
5454 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5455 		break;
5456 	default:
5457 		DRM_ERROR(
5458 			"Unsupported screen format %p4cc\n",
5459 			&fb->format->format);
5460 		return -EINVAL;
5461 	}
5462 
5463 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5464 	case DRM_MODE_ROTATE_0:
5465 		plane_info->rotation = ROTATION_ANGLE_0;
5466 		break;
5467 	case DRM_MODE_ROTATE_90:
5468 		plane_info->rotation = ROTATION_ANGLE_90;
5469 		break;
5470 	case DRM_MODE_ROTATE_180:
5471 		plane_info->rotation = ROTATION_ANGLE_180;
5472 		break;
5473 	case DRM_MODE_ROTATE_270:
5474 		plane_info->rotation = ROTATION_ANGLE_270;
5475 		break;
5476 	default:
5477 		plane_info->rotation = ROTATION_ANGLE_0;
5478 		break;
5479 	}
5480 
5481 	plane_info->visible = true;
5482 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5483 
5484 	plane_info->layer_index = 0;
5485 
5486 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5487 					  &plane_info->color_space);
5488 	if (ret)
5489 		return ret;
5490 
5491 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5492 					   plane_info->rotation, tiling_flags,
5493 					   &plane_info->tiling_info,
5494 					   &plane_info->plane_size,
5495 					   &plane_info->dcc, address, tmz_surface,
5496 					   force_disable_dcc);
5497 	if (ret)
5498 		return ret;
5499 
5500 	fill_blending_from_plane_state(
5501 		plane_state, &plane_info->per_pixel_alpha,
5502 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5503 
5504 	return 0;
5505 }
5506 
5507 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5508 				    struct dc_plane_state *dc_plane_state,
5509 				    struct drm_plane_state *plane_state,
5510 				    struct drm_crtc_state *crtc_state)
5511 {
5512 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5513 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5514 	struct dc_scaling_info scaling_info;
5515 	struct dc_plane_info plane_info;
5516 	int ret;
5517 	bool force_disable_dcc = false;
5518 
5519 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5520 	if (ret)
5521 		return ret;
5522 
5523 	dc_plane_state->src_rect = scaling_info.src_rect;
5524 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5525 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5526 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5527 
5528 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5529 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5530 					  afb->tiling_flags,
5531 					  &plane_info,
5532 					  &dc_plane_state->address,
5533 					  afb->tmz_surface,
5534 					  force_disable_dcc);
5535 	if (ret)
5536 		return ret;
5537 
5538 	dc_plane_state->format = plane_info.format;
5539 	dc_plane_state->color_space = plane_info.color_space;
5540 	dc_plane_state->format = plane_info.format;
5541 	dc_plane_state->plane_size = plane_info.plane_size;
5542 	dc_plane_state->rotation = plane_info.rotation;
5543 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5544 	dc_plane_state->stereo_format = plane_info.stereo_format;
5545 	dc_plane_state->tiling_info = plane_info.tiling_info;
5546 	dc_plane_state->visible = plane_info.visible;
5547 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5548 	dc_plane_state->global_alpha = plane_info.global_alpha;
5549 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5550 	dc_plane_state->dcc = plane_info.dcc;
5551 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5552 	dc_plane_state->flip_int_enabled = true;
5553 
5554 	/*
5555 	 * Always set input transfer function, since plane state is refreshed
5556 	 * every time.
5557 	 */
5558 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5559 	if (ret)
5560 		return ret;
5561 
5562 	return 0;
5563 }
5564 
5565 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5566 					   const struct dm_connector_state *dm_state,
5567 					   struct dc_stream_state *stream)
5568 {
5569 	enum amdgpu_rmx_type rmx_type;
5570 
5571 	struct rect src = { 0 }; /* viewport in composition space*/
5572 	struct rect dst = { 0 }; /* stream addressable area */
5573 
5574 	/* no mode. nothing to be done */
5575 	if (!mode)
5576 		return;
5577 
5578 	/* Full screen scaling by default */
5579 	src.width = mode->hdisplay;
5580 	src.height = mode->vdisplay;
5581 	dst.width = stream->timing.h_addressable;
5582 	dst.height = stream->timing.v_addressable;
5583 
5584 	if (dm_state) {
5585 		rmx_type = dm_state->scaling;
5586 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5587 			if (src.width * dst.height <
5588 					src.height * dst.width) {
5589 				/* height needs less upscaling/more downscaling */
5590 				dst.width = src.width *
5591 						dst.height / src.height;
5592 			} else {
5593 				/* width needs less upscaling/more downscaling */
5594 				dst.height = src.height *
5595 						dst.width / src.width;
5596 			}
5597 		} else if (rmx_type == RMX_CENTER) {
5598 			dst = src;
5599 		}
5600 
5601 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5602 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5603 
5604 		if (dm_state->underscan_enable) {
5605 			dst.x += dm_state->underscan_hborder / 2;
5606 			dst.y += dm_state->underscan_vborder / 2;
5607 			dst.width -= dm_state->underscan_hborder;
5608 			dst.height -= dm_state->underscan_vborder;
5609 		}
5610 	}
5611 
5612 	stream->src = src;
5613 	stream->dst = dst;
5614 
5615 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5616 		      dst.x, dst.y, dst.width, dst.height);
5617 
5618 }
5619 
5620 static enum dc_color_depth
5621 convert_color_depth_from_display_info(const struct drm_connector *connector,
5622 				      bool is_y420, int requested_bpc)
5623 {
5624 	uint8_t bpc;
5625 
5626 	if (is_y420) {
5627 		bpc = 8;
5628 
5629 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5630 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5631 			bpc = 16;
5632 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5633 			bpc = 12;
5634 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5635 			bpc = 10;
5636 	} else {
5637 		bpc = (uint8_t)connector->display_info.bpc;
5638 		/* Assume 8 bpc by default if no bpc is specified. */
5639 		bpc = bpc ? bpc : 8;
5640 	}
5641 
5642 	if (requested_bpc > 0) {
5643 		/*
5644 		 * Cap display bpc based on the user requested value.
5645 		 *
5646 		 * The value for state->max_bpc may not correctly updated
5647 		 * depending on when the connector gets added to the state
5648 		 * or if this was called outside of atomic check, so it
5649 		 * can't be used directly.
5650 		 */
5651 		bpc = min_t(u8, bpc, requested_bpc);
5652 
5653 		/* Round down to the nearest even number. */
5654 		bpc = bpc - (bpc & 1);
5655 	}
5656 
5657 	switch (bpc) {
5658 	case 0:
5659 		/*
5660 		 * Temporary Work around, DRM doesn't parse color depth for
5661 		 * EDID revision before 1.4
5662 		 * TODO: Fix edid parsing
5663 		 */
5664 		return COLOR_DEPTH_888;
5665 	case 6:
5666 		return COLOR_DEPTH_666;
5667 	case 8:
5668 		return COLOR_DEPTH_888;
5669 	case 10:
5670 		return COLOR_DEPTH_101010;
5671 	case 12:
5672 		return COLOR_DEPTH_121212;
5673 	case 14:
5674 		return COLOR_DEPTH_141414;
5675 	case 16:
5676 		return COLOR_DEPTH_161616;
5677 	default:
5678 		return COLOR_DEPTH_UNDEFINED;
5679 	}
5680 }
5681 
5682 static enum dc_aspect_ratio
5683 get_aspect_ratio(const struct drm_display_mode *mode_in)
5684 {
5685 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5686 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5687 }
5688 
5689 static enum dc_color_space
5690 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5691 {
5692 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5693 
5694 	switch (dc_crtc_timing->pixel_encoding)	{
5695 	case PIXEL_ENCODING_YCBCR422:
5696 	case PIXEL_ENCODING_YCBCR444:
5697 	case PIXEL_ENCODING_YCBCR420:
5698 	{
5699 		/*
5700 		 * 27030khz is the separation point between HDTV and SDTV
5701 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5702 		 * respectively
5703 		 */
5704 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5705 			if (dc_crtc_timing->flags.Y_ONLY)
5706 				color_space =
5707 					COLOR_SPACE_YCBCR709_LIMITED;
5708 			else
5709 				color_space = COLOR_SPACE_YCBCR709;
5710 		} else {
5711 			if (dc_crtc_timing->flags.Y_ONLY)
5712 				color_space =
5713 					COLOR_SPACE_YCBCR601_LIMITED;
5714 			else
5715 				color_space = COLOR_SPACE_YCBCR601;
5716 		}
5717 
5718 	}
5719 	break;
5720 	case PIXEL_ENCODING_RGB:
5721 		color_space = COLOR_SPACE_SRGB;
5722 		break;
5723 
5724 	default:
5725 		WARN_ON(1);
5726 		break;
5727 	}
5728 
5729 	return color_space;
5730 }
5731 
5732 static bool adjust_colour_depth_from_display_info(
5733 	struct dc_crtc_timing *timing_out,
5734 	const struct drm_display_info *info)
5735 {
5736 	enum dc_color_depth depth = timing_out->display_color_depth;
5737 	int normalized_clk;
5738 	do {
5739 		normalized_clk = timing_out->pix_clk_100hz / 10;
5740 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5741 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5742 			normalized_clk /= 2;
5743 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5744 		switch (depth) {
5745 		case COLOR_DEPTH_888:
5746 			break;
5747 		case COLOR_DEPTH_101010:
5748 			normalized_clk = (normalized_clk * 30) / 24;
5749 			break;
5750 		case COLOR_DEPTH_121212:
5751 			normalized_clk = (normalized_clk * 36) / 24;
5752 			break;
5753 		case COLOR_DEPTH_161616:
5754 			normalized_clk = (normalized_clk * 48) / 24;
5755 			break;
5756 		default:
5757 			/* The above depths are the only ones valid for HDMI. */
5758 			return false;
5759 		}
5760 		if (normalized_clk <= info->max_tmds_clock) {
5761 			timing_out->display_color_depth = depth;
5762 			return true;
5763 		}
5764 	} while (--depth > COLOR_DEPTH_666);
5765 	return false;
5766 }
5767 
5768 static void fill_stream_properties_from_drm_display_mode(
5769 	struct dc_stream_state *stream,
5770 	const struct drm_display_mode *mode_in,
5771 	const struct drm_connector *connector,
5772 	const struct drm_connector_state *connector_state,
5773 	const struct dc_stream_state *old_stream,
5774 	int requested_bpc)
5775 {
5776 	struct dc_crtc_timing *timing_out = &stream->timing;
5777 	const struct drm_display_info *info = &connector->display_info;
5778 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5779 	struct hdmi_vendor_infoframe hv_frame;
5780 	struct hdmi_avi_infoframe avi_frame;
5781 
5782 	memset(&hv_frame, 0, sizeof(hv_frame));
5783 	memset(&avi_frame, 0, sizeof(avi_frame));
5784 
5785 	timing_out->h_border_left = 0;
5786 	timing_out->h_border_right = 0;
5787 	timing_out->v_border_top = 0;
5788 	timing_out->v_border_bottom = 0;
5789 	/* TODO: un-hardcode */
5790 	if (drm_mode_is_420_only(info, mode_in)
5791 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5792 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5793 	else if (drm_mode_is_420_also(info, mode_in)
5794 			&& aconnector->force_yuv420_output)
5795 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5796 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5797 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5798 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5799 	else
5800 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5801 
5802 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5803 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5804 		connector,
5805 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5806 		requested_bpc);
5807 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5808 	timing_out->hdmi_vic = 0;
5809 
5810 	if(old_stream) {
5811 		timing_out->vic = old_stream->timing.vic;
5812 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5813 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5814 	} else {
5815 		timing_out->vic = drm_match_cea_mode(mode_in);
5816 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5817 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5818 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5819 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5820 	}
5821 
5822 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5823 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5824 		timing_out->vic = avi_frame.video_code;
5825 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5826 		timing_out->hdmi_vic = hv_frame.vic;
5827 	}
5828 
5829 	if (is_freesync_video_mode(mode_in, aconnector)) {
5830 		timing_out->h_addressable = mode_in->hdisplay;
5831 		timing_out->h_total = mode_in->htotal;
5832 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5833 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5834 		timing_out->v_total = mode_in->vtotal;
5835 		timing_out->v_addressable = mode_in->vdisplay;
5836 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5837 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5838 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5839 	} else {
5840 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5841 		timing_out->h_total = mode_in->crtc_htotal;
5842 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5843 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5844 		timing_out->v_total = mode_in->crtc_vtotal;
5845 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5846 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5847 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5848 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5849 	}
5850 
5851 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5852 
5853 	stream->output_color_space = get_output_color_space(timing_out);
5854 
5855 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5856 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5857 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5858 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5859 		    drm_mode_is_420_also(info, mode_in) &&
5860 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5861 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5862 			adjust_colour_depth_from_display_info(timing_out, info);
5863 		}
5864 	}
5865 }
5866 
5867 static void fill_audio_info(struct audio_info *audio_info,
5868 			    const struct drm_connector *drm_connector,
5869 			    const struct dc_sink *dc_sink)
5870 {
5871 	int i = 0;
5872 	int cea_revision = 0;
5873 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5874 
5875 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5876 	audio_info->product_id = edid_caps->product_id;
5877 
5878 	cea_revision = drm_connector->display_info.cea_rev;
5879 
5880 	strscpy(audio_info->display_name,
5881 		edid_caps->display_name,
5882 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5883 
5884 	if (cea_revision >= 3) {
5885 		audio_info->mode_count = edid_caps->audio_mode_count;
5886 
5887 		for (i = 0; i < audio_info->mode_count; ++i) {
5888 			audio_info->modes[i].format_code =
5889 					(enum audio_format_code)
5890 					(edid_caps->audio_modes[i].format_code);
5891 			audio_info->modes[i].channel_count =
5892 					edid_caps->audio_modes[i].channel_count;
5893 			audio_info->modes[i].sample_rates.all =
5894 					edid_caps->audio_modes[i].sample_rate;
5895 			audio_info->modes[i].sample_size =
5896 					edid_caps->audio_modes[i].sample_size;
5897 		}
5898 	}
5899 
5900 	audio_info->flags.all = edid_caps->speaker_flags;
5901 
5902 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5903 	if (drm_connector->latency_present[0]) {
5904 		audio_info->video_latency = drm_connector->video_latency[0];
5905 		audio_info->audio_latency = drm_connector->audio_latency[0];
5906 	}
5907 
5908 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5909 
5910 }
5911 
5912 static void
5913 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5914 				      struct drm_display_mode *dst_mode)
5915 {
5916 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5917 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5918 	dst_mode->crtc_clock = src_mode->crtc_clock;
5919 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5920 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5921 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5922 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5923 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5924 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5925 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5926 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5927 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5928 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5929 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5930 }
5931 
5932 static void
5933 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5934 					const struct drm_display_mode *native_mode,
5935 					bool scale_enabled)
5936 {
5937 	if (scale_enabled) {
5938 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5939 	} else if (native_mode->clock == drm_mode->clock &&
5940 			native_mode->htotal == drm_mode->htotal &&
5941 			native_mode->vtotal == drm_mode->vtotal) {
5942 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5943 	} else {
5944 		/* no scaling nor amdgpu inserted, no need to patch */
5945 	}
5946 }
5947 
5948 static struct dc_sink *
5949 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5950 {
5951 	struct dc_sink_init_data sink_init_data = { 0 };
5952 	struct dc_sink *sink = NULL;
5953 	sink_init_data.link = aconnector->dc_link;
5954 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5955 
5956 	sink = dc_sink_create(&sink_init_data);
5957 	if (!sink) {
5958 		DRM_ERROR("Failed to create sink!\n");
5959 		return NULL;
5960 	}
5961 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5962 
5963 	return sink;
5964 }
5965 
5966 static void set_multisync_trigger_params(
5967 		struct dc_stream_state *stream)
5968 {
5969 	struct dc_stream_state *master = NULL;
5970 
5971 	if (stream->triggered_crtc_reset.enabled) {
5972 		master = stream->triggered_crtc_reset.event_source;
5973 		stream->triggered_crtc_reset.event =
5974 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5975 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5976 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5977 	}
5978 }
5979 
5980 static void set_master_stream(struct dc_stream_state *stream_set[],
5981 			      int stream_count)
5982 {
5983 	int j, highest_rfr = 0, master_stream = 0;
5984 
5985 	for (j = 0;  j < stream_count; j++) {
5986 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5987 			int refresh_rate = 0;
5988 
5989 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5990 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5991 			if (refresh_rate > highest_rfr) {
5992 				highest_rfr = refresh_rate;
5993 				master_stream = j;
5994 			}
5995 		}
5996 	}
5997 	for (j = 0;  j < stream_count; j++) {
5998 		if (stream_set[j])
5999 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6000 	}
6001 }
6002 
6003 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6004 {
6005 	int i = 0;
6006 	struct dc_stream_state *stream;
6007 
6008 	if (context->stream_count < 2)
6009 		return;
6010 	for (i = 0; i < context->stream_count ; i++) {
6011 		if (!context->streams[i])
6012 			continue;
6013 		/*
6014 		 * TODO: add a function to read AMD VSDB bits and set
6015 		 * crtc_sync_master.multi_sync_enabled flag
6016 		 * For now it's set to false
6017 		 */
6018 	}
6019 
6020 	set_master_stream(context->streams, context->stream_count);
6021 
6022 	for (i = 0; i < context->stream_count ; i++) {
6023 		stream = context->streams[i];
6024 
6025 		if (!stream)
6026 			continue;
6027 
6028 		set_multisync_trigger_params(stream);
6029 	}
6030 }
6031 
6032 #if defined(CONFIG_DRM_AMD_DC_DCN)
6033 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6034 							struct dc_sink *sink, struct dc_stream_state *stream,
6035 							struct dsc_dec_dpcd_caps *dsc_caps)
6036 {
6037 	stream->timing.flags.DSC = 0;
6038 
6039 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6040 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6041 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6042 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6043 				      dsc_caps);
6044 	}
6045 }
6046 
6047 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6048 										struct dc_sink *sink, struct dc_stream_state *stream,
6049 										struct dsc_dec_dpcd_caps *dsc_caps)
6050 {
6051 	struct drm_connector *drm_connector = &aconnector->base;
6052 	uint32_t link_bandwidth_kbps;
6053 	uint32_t max_dsc_target_bpp_limit_override = 0;
6054 
6055 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6056 							dc_link_get_link_cap(aconnector->dc_link));
6057 
6058 	if (stream->link && stream->link->local_sink)
6059 		max_dsc_target_bpp_limit_override =
6060 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6061 
6062 	/* Set DSC policy according to dsc_clock_en */
6063 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6064 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6065 
6066 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6067 
6068 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6069 						dsc_caps,
6070 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6071 						max_dsc_target_bpp_limit_override,
6072 						link_bandwidth_kbps,
6073 						&stream->timing,
6074 						&stream->timing.dsc_cfg)) {
6075 			stream->timing.flags.DSC = 1;
6076 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6077 		}
6078 	}
6079 
6080 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6081 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6082 		stream->timing.flags.DSC = 1;
6083 
6084 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6085 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6086 
6087 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6088 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6089 
6090 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6091 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6092 }
6093 #endif /* CONFIG_DRM_AMD_DC_DCN */
6094 
6095 /**
6096  * DOC: FreeSync Video
6097  *
6098  * When a userspace application wants to play a video, the content follows a
6099  * standard format definition that usually specifies the FPS for that format.
6100  * The below list illustrates some video format and the expected FPS,
6101  * respectively:
6102  *
6103  * - TV/NTSC (23.976 FPS)
6104  * - Cinema (24 FPS)
6105  * - TV/PAL (25 FPS)
6106  * - TV/NTSC (29.97 FPS)
6107  * - TV/NTSC (30 FPS)
6108  * - Cinema HFR (48 FPS)
6109  * - TV/PAL (50 FPS)
6110  * - Commonly used (60 FPS)
6111  * - Multiples of 24 (48,72,96,120 FPS)
6112  *
6113  * The list of standards video format is not huge and can be added to the
6114  * connector modeset list beforehand. With that, userspace can leverage
6115  * FreeSync to extends the front porch in order to attain the target refresh
6116  * rate. Such a switch will happen seamlessly, without screen blanking or
6117  * reprogramming of the output in any other way. If the userspace requests a
6118  * modesetting change compatible with FreeSync modes that only differ in the
6119  * refresh rate, DC will skip the full update and avoid blink during the
6120  * transition. For example, the video player can change the modesetting from
6121  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6122  * causing any display blink. This same concept can be applied to a mode
6123  * setting change.
6124  */
6125 static struct drm_display_mode *
6126 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6127 			  bool use_probed_modes)
6128 {
6129 	struct drm_display_mode *m, *m_pref = NULL;
6130 	u16 current_refresh, highest_refresh;
6131 	struct list_head *list_head = use_probed_modes ?
6132 						    &aconnector->base.probed_modes :
6133 						    &aconnector->base.modes;
6134 
6135 	if (aconnector->freesync_vid_base.clock != 0)
6136 		return &aconnector->freesync_vid_base;
6137 
6138 	/* Find the preferred mode */
6139 	list_for_each_entry (m, list_head, head) {
6140 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6141 			m_pref = m;
6142 			break;
6143 		}
6144 	}
6145 
6146 	if (!m_pref) {
6147 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6148 		m_pref = list_first_entry_or_null(
6149 			&aconnector->base.modes, struct drm_display_mode, head);
6150 		if (!m_pref) {
6151 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6152 			return NULL;
6153 		}
6154 	}
6155 
6156 	highest_refresh = drm_mode_vrefresh(m_pref);
6157 
6158 	/*
6159 	 * Find the mode with highest refresh rate with same resolution.
6160 	 * For some monitors, preferred mode is not the mode with highest
6161 	 * supported refresh rate.
6162 	 */
6163 	list_for_each_entry (m, list_head, head) {
6164 		current_refresh  = drm_mode_vrefresh(m);
6165 
6166 		if (m->hdisplay == m_pref->hdisplay &&
6167 		    m->vdisplay == m_pref->vdisplay &&
6168 		    highest_refresh < current_refresh) {
6169 			highest_refresh = current_refresh;
6170 			m_pref = m;
6171 		}
6172 	}
6173 
6174 	aconnector->freesync_vid_base = *m_pref;
6175 	return m_pref;
6176 }
6177 
6178 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6179 				   struct amdgpu_dm_connector *aconnector)
6180 {
6181 	struct drm_display_mode *high_mode;
6182 	int timing_diff;
6183 
6184 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6185 	if (!high_mode || !mode)
6186 		return false;
6187 
6188 	timing_diff = high_mode->vtotal - mode->vtotal;
6189 
6190 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6191 	    high_mode->hdisplay != mode->hdisplay ||
6192 	    high_mode->vdisplay != mode->vdisplay ||
6193 	    high_mode->hsync_start != mode->hsync_start ||
6194 	    high_mode->hsync_end != mode->hsync_end ||
6195 	    high_mode->htotal != mode->htotal ||
6196 	    high_mode->hskew != mode->hskew ||
6197 	    high_mode->vscan != mode->vscan ||
6198 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6199 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6200 		return false;
6201 	else
6202 		return true;
6203 }
6204 
6205 static struct dc_stream_state *
6206 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6207 		       const struct drm_display_mode *drm_mode,
6208 		       const struct dm_connector_state *dm_state,
6209 		       const struct dc_stream_state *old_stream,
6210 		       int requested_bpc)
6211 {
6212 	struct drm_display_mode *preferred_mode = NULL;
6213 	struct drm_connector *drm_connector;
6214 	const struct drm_connector_state *con_state =
6215 		dm_state ? &dm_state->base : NULL;
6216 	struct dc_stream_state *stream = NULL;
6217 	struct drm_display_mode mode = *drm_mode;
6218 	struct drm_display_mode saved_mode;
6219 	struct drm_display_mode *freesync_mode = NULL;
6220 	bool native_mode_found = false;
6221 	bool recalculate_timing = false;
6222 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6223 	int mode_refresh;
6224 	int preferred_refresh = 0;
6225 #if defined(CONFIG_DRM_AMD_DC_DCN)
6226 	struct dsc_dec_dpcd_caps dsc_caps;
6227 #endif
6228 	struct dc_sink *sink = NULL;
6229 
6230 	memset(&saved_mode, 0, sizeof(saved_mode));
6231 
6232 	if (aconnector == NULL) {
6233 		DRM_ERROR("aconnector is NULL!\n");
6234 		return stream;
6235 	}
6236 
6237 	drm_connector = &aconnector->base;
6238 
6239 	if (!aconnector->dc_sink) {
6240 		sink = create_fake_sink(aconnector);
6241 		if (!sink)
6242 			return stream;
6243 	} else {
6244 		sink = aconnector->dc_sink;
6245 		dc_sink_retain(sink);
6246 	}
6247 
6248 	stream = dc_create_stream_for_sink(sink);
6249 
6250 	if (stream == NULL) {
6251 		DRM_ERROR("Failed to create stream for sink!\n");
6252 		goto finish;
6253 	}
6254 
6255 	stream->dm_stream_context = aconnector;
6256 
6257 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6258 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6259 
6260 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6261 		/* Search for preferred mode */
6262 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6263 			native_mode_found = true;
6264 			break;
6265 		}
6266 	}
6267 	if (!native_mode_found)
6268 		preferred_mode = list_first_entry_or_null(
6269 				&aconnector->base.modes,
6270 				struct drm_display_mode,
6271 				head);
6272 
6273 	mode_refresh = drm_mode_vrefresh(&mode);
6274 
6275 	if (preferred_mode == NULL) {
6276 		/*
6277 		 * This may not be an error, the use case is when we have no
6278 		 * usermode calls to reset and set mode upon hotplug. In this
6279 		 * case, we call set mode ourselves to restore the previous mode
6280 		 * and the modelist may not be filled in in time.
6281 		 */
6282 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6283 	} else {
6284 		recalculate_timing = amdgpu_freesync_vid_mode &&
6285 				 is_freesync_video_mode(&mode, aconnector);
6286 		if (recalculate_timing) {
6287 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6288 			saved_mode = mode;
6289 			mode = *freesync_mode;
6290 		} else {
6291 			decide_crtc_timing_for_drm_display_mode(
6292 				&mode, preferred_mode, scale);
6293 
6294 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6295 		}
6296 	}
6297 
6298 	if (recalculate_timing)
6299 		drm_mode_set_crtcinfo(&saved_mode, 0);
6300 	else if (!dm_state)
6301 		drm_mode_set_crtcinfo(&mode, 0);
6302 
6303        /*
6304 	* If scaling is enabled and refresh rate didn't change
6305 	* we copy the vic and polarities of the old timings
6306 	*/
6307 	if (!scale || mode_refresh != preferred_refresh)
6308 		fill_stream_properties_from_drm_display_mode(
6309 			stream, &mode, &aconnector->base, con_state, NULL,
6310 			requested_bpc);
6311 	else
6312 		fill_stream_properties_from_drm_display_mode(
6313 			stream, &mode, &aconnector->base, con_state, old_stream,
6314 			requested_bpc);
6315 
6316 #if defined(CONFIG_DRM_AMD_DC_DCN)
6317 	/* SST DSC determination policy */
6318 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6319 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6320 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6321 #endif
6322 
6323 	update_stream_scaling_settings(&mode, dm_state, stream);
6324 
6325 	fill_audio_info(
6326 		&stream->audio_info,
6327 		drm_connector,
6328 		sink);
6329 
6330 	update_stream_signal(stream, sink);
6331 
6332 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6333 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6334 
6335 	if (stream->link->psr_settings.psr_feature_enabled) {
6336 		//
6337 		// should decide stream support vsc sdp colorimetry capability
6338 		// before building vsc info packet
6339 		//
6340 		stream->use_vsc_sdp_for_colorimetry = false;
6341 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6342 			stream->use_vsc_sdp_for_colorimetry =
6343 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6344 		} else {
6345 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6346 				stream->use_vsc_sdp_for_colorimetry = true;
6347 		}
6348 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6349 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6350 
6351 	}
6352 finish:
6353 	dc_sink_release(sink);
6354 
6355 	return stream;
6356 }
6357 
6358 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6359 {
6360 	drm_crtc_cleanup(crtc);
6361 	kfree(crtc);
6362 }
6363 
6364 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6365 				  struct drm_crtc_state *state)
6366 {
6367 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6368 
6369 	/* TODO Destroy dc_stream objects are stream object is flattened */
6370 	if (cur->stream)
6371 		dc_stream_release(cur->stream);
6372 
6373 
6374 	__drm_atomic_helper_crtc_destroy_state(state);
6375 
6376 
6377 	kfree(state);
6378 }
6379 
6380 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6381 {
6382 	struct dm_crtc_state *state;
6383 
6384 	if (crtc->state)
6385 		dm_crtc_destroy_state(crtc, crtc->state);
6386 
6387 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6388 	if (WARN_ON(!state))
6389 		return;
6390 
6391 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6392 }
6393 
6394 static struct drm_crtc_state *
6395 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6396 {
6397 	struct dm_crtc_state *state, *cur;
6398 
6399 	cur = to_dm_crtc_state(crtc->state);
6400 
6401 	if (WARN_ON(!crtc->state))
6402 		return NULL;
6403 
6404 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6405 	if (!state)
6406 		return NULL;
6407 
6408 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6409 
6410 	if (cur->stream) {
6411 		state->stream = cur->stream;
6412 		dc_stream_retain(state->stream);
6413 	}
6414 
6415 	state->active_planes = cur->active_planes;
6416 	state->vrr_infopacket = cur->vrr_infopacket;
6417 	state->abm_level = cur->abm_level;
6418 	state->vrr_supported = cur->vrr_supported;
6419 	state->freesync_config = cur->freesync_config;
6420 	state->cm_has_degamma = cur->cm_has_degamma;
6421 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6422 	state->force_dpms_off = cur->force_dpms_off;
6423 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6424 
6425 	return &state->base;
6426 }
6427 
6428 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6429 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6430 {
6431 	crtc_debugfs_init(crtc);
6432 
6433 	return 0;
6434 }
6435 #endif
6436 
6437 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6438 {
6439 	enum dc_irq_source irq_source;
6440 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6441 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6442 	int rc;
6443 
6444 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6445 
6446 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6447 
6448 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6449 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6450 	return rc;
6451 }
6452 
6453 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6454 {
6455 	enum dc_irq_source irq_source;
6456 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6457 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6458 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6459 #if defined(CONFIG_DRM_AMD_DC_DCN)
6460 	struct amdgpu_display_manager *dm = &adev->dm;
6461 	struct vblank_control_work *work;
6462 #endif
6463 	int rc = 0;
6464 
6465 	if (enable) {
6466 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6467 		if (amdgpu_dm_vrr_active(acrtc_state))
6468 			rc = dm_set_vupdate_irq(crtc, true);
6469 	} else {
6470 		/* vblank irq off -> vupdate irq off */
6471 		rc = dm_set_vupdate_irq(crtc, false);
6472 	}
6473 
6474 	if (rc)
6475 		return rc;
6476 
6477 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6478 
6479 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6480 		return -EBUSY;
6481 
6482 	if (amdgpu_in_reset(adev))
6483 		return 0;
6484 
6485 #if defined(CONFIG_DRM_AMD_DC_DCN)
6486 	if (dm->vblank_control_workqueue) {
6487 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6488 		if (!work)
6489 			return -ENOMEM;
6490 
6491 		INIT_WORK(&work->work, vblank_control_worker);
6492 		work->dm = dm;
6493 		work->acrtc = acrtc;
6494 		work->enable = enable;
6495 
6496 		if (acrtc_state->stream) {
6497 			dc_stream_retain(acrtc_state->stream);
6498 			work->stream = acrtc_state->stream;
6499 		}
6500 
6501 		queue_work(dm->vblank_control_workqueue, &work->work);
6502 	}
6503 #endif
6504 
6505 	return 0;
6506 }
6507 
6508 static int dm_enable_vblank(struct drm_crtc *crtc)
6509 {
6510 	return dm_set_vblank(crtc, true);
6511 }
6512 
6513 static void dm_disable_vblank(struct drm_crtc *crtc)
6514 {
6515 	dm_set_vblank(crtc, false);
6516 }
6517 
6518 /* Implemented only the options currently availible for the driver */
6519 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6520 	.reset = dm_crtc_reset_state,
6521 	.destroy = amdgpu_dm_crtc_destroy,
6522 	.set_config = drm_atomic_helper_set_config,
6523 	.page_flip = drm_atomic_helper_page_flip,
6524 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6525 	.atomic_destroy_state = dm_crtc_destroy_state,
6526 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6527 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6528 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6529 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6530 	.enable_vblank = dm_enable_vblank,
6531 	.disable_vblank = dm_disable_vblank,
6532 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6533 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6534 	.late_register = amdgpu_dm_crtc_late_register,
6535 #endif
6536 };
6537 
6538 static enum drm_connector_status
6539 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6540 {
6541 	bool connected;
6542 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6543 
6544 	/*
6545 	 * Notes:
6546 	 * 1. This interface is NOT called in context of HPD irq.
6547 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6548 	 * makes it a bad place for *any* MST-related activity.
6549 	 */
6550 
6551 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6552 	    !aconnector->fake_enable)
6553 		connected = (aconnector->dc_sink != NULL);
6554 	else
6555 		connected = (aconnector->base.force == DRM_FORCE_ON);
6556 
6557 	update_subconnector_property(aconnector);
6558 
6559 	return (connected ? connector_status_connected :
6560 			connector_status_disconnected);
6561 }
6562 
6563 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6564 					    struct drm_connector_state *connector_state,
6565 					    struct drm_property *property,
6566 					    uint64_t val)
6567 {
6568 	struct drm_device *dev = connector->dev;
6569 	struct amdgpu_device *adev = drm_to_adev(dev);
6570 	struct dm_connector_state *dm_old_state =
6571 		to_dm_connector_state(connector->state);
6572 	struct dm_connector_state *dm_new_state =
6573 		to_dm_connector_state(connector_state);
6574 
6575 	int ret = -EINVAL;
6576 
6577 	if (property == dev->mode_config.scaling_mode_property) {
6578 		enum amdgpu_rmx_type rmx_type;
6579 
6580 		switch (val) {
6581 		case DRM_MODE_SCALE_CENTER:
6582 			rmx_type = RMX_CENTER;
6583 			break;
6584 		case DRM_MODE_SCALE_ASPECT:
6585 			rmx_type = RMX_ASPECT;
6586 			break;
6587 		case DRM_MODE_SCALE_FULLSCREEN:
6588 			rmx_type = RMX_FULL;
6589 			break;
6590 		case DRM_MODE_SCALE_NONE:
6591 		default:
6592 			rmx_type = RMX_OFF;
6593 			break;
6594 		}
6595 
6596 		if (dm_old_state->scaling == rmx_type)
6597 			return 0;
6598 
6599 		dm_new_state->scaling = rmx_type;
6600 		ret = 0;
6601 	} else if (property == adev->mode_info.underscan_hborder_property) {
6602 		dm_new_state->underscan_hborder = val;
6603 		ret = 0;
6604 	} else if (property == adev->mode_info.underscan_vborder_property) {
6605 		dm_new_state->underscan_vborder = val;
6606 		ret = 0;
6607 	} else if (property == adev->mode_info.underscan_property) {
6608 		dm_new_state->underscan_enable = val;
6609 		ret = 0;
6610 	} else if (property == adev->mode_info.abm_level_property) {
6611 		dm_new_state->abm_level = val;
6612 		ret = 0;
6613 	}
6614 
6615 	return ret;
6616 }
6617 
6618 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6619 					    const struct drm_connector_state *state,
6620 					    struct drm_property *property,
6621 					    uint64_t *val)
6622 {
6623 	struct drm_device *dev = connector->dev;
6624 	struct amdgpu_device *adev = drm_to_adev(dev);
6625 	struct dm_connector_state *dm_state =
6626 		to_dm_connector_state(state);
6627 	int ret = -EINVAL;
6628 
6629 	if (property == dev->mode_config.scaling_mode_property) {
6630 		switch (dm_state->scaling) {
6631 		case RMX_CENTER:
6632 			*val = DRM_MODE_SCALE_CENTER;
6633 			break;
6634 		case RMX_ASPECT:
6635 			*val = DRM_MODE_SCALE_ASPECT;
6636 			break;
6637 		case RMX_FULL:
6638 			*val = DRM_MODE_SCALE_FULLSCREEN;
6639 			break;
6640 		case RMX_OFF:
6641 		default:
6642 			*val = DRM_MODE_SCALE_NONE;
6643 			break;
6644 		}
6645 		ret = 0;
6646 	} else if (property == adev->mode_info.underscan_hborder_property) {
6647 		*val = dm_state->underscan_hborder;
6648 		ret = 0;
6649 	} else if (property == adev->mode_info.underscan_vborder_property) {
6650 		*val = dm_state->underscan_vborder;
6651 		ret = 0;
6652 	} else if (property == adev->mode_info.underscan_property) {
6653 		*val = dm_state->underscan_enable;
6654 		ret = 0;
6655 	} else if (property == adev->mode_info.abm_level_property) {
6656 		*val = dm_state->abm_level;
6657 		ret = 0;
6658 	}
6659 
6660 	return ret;
6661 }
6662 
6663 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6664 {
6665 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6666 
6667 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6668 }
6669 
6670 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6671 {
6672 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6673 	const struct dc_link *link = aconnector->dc_link;
6674 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6675 	struct amdgpu_display_manager *dm = &adev->dm;
6676 	int i;
6677 
6678 	/*
6679 	 * Call only if mst_mgr was iniitalized before since it's not done
6680 	 * for all connector types.
6681 	 */
6682 	if (aconnector->mst_mgr.dev)
6683 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6684 
6685 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6686 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6687 	for (i = 0; i < dm->num_of_edps; i++) {
6688 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6689 			backlight_device_unregister(dm->backlight_dev[i]);
6690 			dm->backlight_dev[i] = NULL;
6691 		}
6692 	}
6693 #endif
6694 
6695 	if (aconnector->dc_em_sink)
6696 		dc_sink_release(aconnector->dc_em_sink);
6697 	aconnector->dc_em_sink = NULL;
6698 	if (aconnector->dc_sink)
6699 		dc_sink_release(aconnector->dc_sink);
6700 	aconnector->dc_sink = NULL;
6701 
6702 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6703 	drm_connector_unregister(connector);
6704 	drm_connector_cleanup(connector);
6705 	if (aconnector->i2c) {
6706 		i2c_del_adapter(&aconnector->i2c->base);
6707 		kfree(aconnector->i2c);
6708 	}
6709 	kfree(aconnector->dm_dp_aux.aux.name);
6710 
6711 	kfree(connector);
6712 }
6713 
6714 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6715 {
6716 	struct dm_connector_state *state =
6717 		to_dm_connector_state(connector->state);
6718 
6719 	if (connector->state)
6720 		__drm_atomic_helper_connector_destroy_state(connector->state);
6721 
6722 	kfree(state);
6723 
6724 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6725 
6726 	if (state) {
6727 		state->scaling = RMX_OFF;
6728 		state->underscan_enable = false;
6729 		state->underscan_hborder = 0;
6730 		state->underscan_vborder = 0;
6731 		state->base.max_requested_bpc = 8;
6732 		state->vcpi_slots = 0;
6733 		state->pbn = 0;
6734 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6735 			state->abm_level = amdgpu_dm_abm_level;
6736 
6737 		__drm_atomic_helper_connector_reset(connector, &state->base);
6738 	}
6739 }
6740 
6741 struct drm_connector_state *
6742 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6743 {
6744 	struct dm_connector_state *state =
6745 		to_dm_connector_state(connector->state);
6746 
6747 	struct dm_connector_state *new_state =
6748 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6749 
6750 	if (!new_state)
6751 		return NULL;
6752 
6753 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6754 
6755 	new_state->freesync_capable = state->freesync_capable;
6756 	new_state->abm_level = state->abm_level;
6757 	new_state->scaling = state->scaling;
6758 	new_state->underscan_enable = state->underscan_enable;
6759 	new_state->underscan_hborder = state->underscan_hborder;
6760 	new_state->underscan_vborder = state->underscan_vborder;
6761 	new_state->vcpi_slots = state->vcpi_slots;
6762 	new_state->pbn = state->pbn;
6763 	return &new_state->base;
6764 }
6765 
6766 static int
6767 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6768 {
6769 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6770 		to_amdgpu_dm_connector(connector);
6771 	int r;
6772 
6773 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6774 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6775 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6776 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6777 		if (r)
6778 			return r;
6779 	}
6780 
6781 #if defined(CONFIG_DEBUG_FS)
6782 	connector_debugfs_init(amdgpu_dm_connector);
6783 #endif
6784 
6785 	return 0;
6786 }
6787 
6788 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6789 	.reset = amdgpu_dm_connector_funcs_reset,
6790 	.detect = amdgpu_dm_connector_detect,
6791 	.fill_modes = drm_helper_probe_single_connector_modes,
6792 	.destroy = amdgpu_dm_connector_destroy,
6793 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6794 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6795 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6796 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6797 	.late_register = amdgpu_dm_connector_late_register,
6798 	.early_unregister = amdgpu_dm_connector_unregister
6799 };
6800 
6801 static int get_modes(struct drm_connector *connector)
6802 {
6803 	return amdgpu_dm_connector_get_modes(connector);
6804 }
6805 
6806 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6807 {
6808 	struct dc_sink_init_data init_params = {
6809 			.link = aconnector->dc_link,
6810 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6811 	};
6812 	struct edid *edid;
6813 
6814 	if (!aconnector->base.edid_blob_ptr) {
6815 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6816 				aconnector->base.name);
6817 
6818 		aconnector->base.force = DRM_FORCE_OFF;
6819 		aconnector->base.override_edid = false;
6820 		return;
6821 	}
6822 
6823 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6824 
6825 	aconnector->edid = edid;
6826 
6827 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6828 		aconnector->dc_link,
6829 		(uint8_t *)edid,
6830 		(edid->extensions + 1) * EDID_LENGTH,
6831 		&init_params);
6832 
6833 	if (aconnector->base.force == DRM_FORCE_ON) {
6834 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6835 		aconnector->dc_link->local_sink :
6836 		aconnector->dc_em_sink;
6837 		dc_sink_retain(aconnector->dc_sink);
6838 	}
6839 }
6840 
6841 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6842 {
6843 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6844 
6845 	/*
6846 	 * In case of headless boot with force on for DP managed connector
6847 	 * Those settings have to be != 0 to get initial modeset
6848 	 */
6849 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6850 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6851 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6852 	}
6853 
6854 
6855 	aconnector->base.override_edid = true;
6856 	create_eml_sink(aconnector);
6857 }
6858 
6859 static struct dc_stream_state *
6860 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6861 				const struct drm_display_mode *drm_mode,
6862 				const struct dm_connector_state *dm_state,
6863 				const struct dc_stream_state *old_stream)
6864 {
6865 	struct drm_connector *connector = &aconnector->base;
6866 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6867 	struct dc_stream_state *stream;
6868 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6869 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6870 	enum dc_status dc_result = DC_OK;
6871 
6872 	do {
6873 		stream = create_stream_for_sink(aconnector, drm_mode,
6874 						dm_state, old_stream,
6875 						requested_bpc);
6876 		if (stream == NULL) {
6877 			DRM_ERROR("Failed to create stream for sink!\n");
6878 			break;
6879 		}
6880 
6881 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6882 
6883 		if (dc_result != DC_OK) {
6884 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6885 				      drm_mode->hdisplay,
6886 				      drm_mode->vdisplay,
6887 				      drm_mode->clock,
6888 				      dc_result,
6889 				      dc_status_to_str(dc_result));
6890 
6891 			dc_stream_release(stream);
6892 			stream = NULL;
6893 			requested_bpc -= 2; /* lower bpc to retry validation */
6894 		}
6895 
6896 	} while (stream == NULL && requested_bpc >= 6);
6897 
6898 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6899 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6900 
6901 		aconnector->force_yuv420_output = true;
6902 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6903 						dm_state, old_stream);
6904 		aconnector->force_yuv420_output = false;
6905 	}
6906 
6907 	return stream;
6908 }
6909 
6910 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6911 				   struct drm_display_mode *mode)
6912 {
6913 	int result = MODE_ERROR;
6914 	struct dc_sink *dc_sink;
6915 	/* TODO: Unhardcode stream count */
6916 	struct dc_stream_state *stream;
6917 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6918 
6919 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6920 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6921 		return result;
6922 
6923 	/*
6924 	 * Only run this the first time mode_valid is called to initilialize
6925 	 * EDID mgmt
6926 	 */
6927 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6928 		!aconnector->dc_em_sink)
6929 		handle_edid_mgmt(aconnector);
6930 
6931 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6932 
6933 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6934 				aconnector->base.force != DRM_FORCE_ON) {
6935 		DRM_ERROR("dc_sink is NULL!\n");
6936 		goto fail;
6937 	}
6938 
6939 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6940 	if (stream) {
6941 		dc_stream_release(stream);
6942 		result = MODE_OK;
6943 	}
6944 
6945 fail:
6946 	/* TODO: error handling*/
6947 	return result;
6948 }
6949 
6950 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6951 				struct dc_info_packet *out)
6952 {
6953 	struct hdmi_drm_infoframe frame;
6954 	unsigned char buf[30]; /* 26 + 4 */
6955 	ssize_t len;
6956 	int ret, i;
6957 
6958 	memset(out, 0, sizeof(*out));
6959 
6960 	if (!state->hdr_output_metadata)
6961 		return 0;
6962 
6963 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6964 	if (ret)
6965 		return ret;
6966 
6967 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6968 	if (len < 0)
6969 		return (int)len;
6970 
6971 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6972 	if (len != 30)
6973 		return -EINVAL;
6974 
6975 	/* Prepare the infopacket for DC. */
6976 	switch (state->connector->connector_type) {
6977 	case DRM_MODE_CONNECTOR_HDMIA:
6978 		out->hb0 = 0x87; /* type */
6979 		out->hb1 = 0x01; /* version */
6980 		out->hb2 = 0x1A; /* length */
6981 		out->sb[0] = buf[3]; /* checksum */
6982 		i = 1;
6983 		break;
6984 
6985 	case DRM_MODE_CONNECTOR_DisplayPort:
6986 	case DRM_MODE_CONNECTOR_eDP:
6987 		out->hb0 = 0x00; /* sdp id, zero */
6988 		out->hb1 = 0x87; /* type */
6989 		out->hb2 = 0x1D; /* payload len - 1 */
6990 		out->hb3 = (0x13 << 2); /* sdp version */
6991 		out->sb[0] = 0x01; /* version */
6992 		out->sb[1] = 0x1A; /* length */
6993 		i = 2;
6994 		break;
6995 
6996 	default:
6997 		return -EINVAL;
6998 	}
6999 
7000 	memcpy(&out->sb[i], &buf[4], 26);
7001 	out->valid = true;
7002 
7003 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7004 		       sizeof(out->sb), false);
7005 
7006 	return 0;
7007 }
7008 
7009 static int
7010 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7011 				 struct drm_atomic_state *state)
7012 {
7013 	struct drm_connector_state *new_con_state =
7014 		drm_atomic_get_new_connector_state(state, conn);
7015 	struct drm_connector_state *old_con_state =
7016 		drm_atomic_get_old_connector_state(state, conn);
7017 	struct drm_crtc *crtc = new_con_state->crtc;
7018 	struct drm_crtc_state *new_crtc_state;
7019 	int ret;
7020 
7021 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7022 
7023 	if (!crtc)
7024 		return 0;
7025 
7026 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7027 		struct dc_info_packet hdr_infopacket;
7028 
7029 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7030 		if (ret)
7031 			return ret;
7032 
7033 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7034 		if (IS_ERR(new_crtc_state))
7035 			return PTR_ERR(new_crtc_state);
7036 
7037 		/*
7038 		 * DC considers the stream backends changed if the
7039 		 * static metadata changes. Forcing the modeset also
7040 		 * gives a simple way for userspace to switch from
7041 		 * 8bpc to 10bpc when setting the metadata to enter
7042 		 * or exit HDR.
7043 		 *
7044 		 * Changing the static metadata after it's been
7045 		 * set is permissible, however. So only force a
7046 		 * modeset if we're entering or exiting HDR.
7047 		 */
7048 		new_crtc_state->mode_changed =
7049 			!old_con_state->hdr_output_metadata ||
7050 			!new_con_state->hdr_output_metadata;
7051 	}
7052 
7053 	return 0;
7054 }
7055 
7056 static const struct drm_connector_helper_funcs
7057 amdgpu_dm_connector_helper_funcs = {
7058 	/*
7059 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7060 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7061 	 * are missing after user start lightdm. So we need to renew modes list.
7062 	 * in get_modes call back, not just return the modes count
7063 	 */
7064 	.get_modes = get_modes,
7065 	.mode_valid = amdgpu_dm_connector_mode_valid,
7066 	.atomic_check = amdgpu_dm_connector_atomic_check,
7067 };
7068 
7069 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7070 {
7071 }
7072 
7073 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7074 {
7075 	struct drm_atomic_state *state = new_crtc_state->state;
7076 	struct drm_plane *plane;
7077 	int num_active = 0;
7078 
7079 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7080 		struct drm_plane_state *new_plane_state;
7081 
7082 		/* Cursor planes are "fake". */
7083 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7084 			continue;
7085 
7086 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7087 
7088 		if (!new_plane_state) {
7089 			/*
7090 			 * The plane is enable on the CRTC and hasn't changed
7091 			 * state. This means that it previously passed
7092 			 * validation and is therefore enabled.
7093 			 */
7094 			num_active += 1;
7095 			continue;
7096 		}
7097 
7098 		/* We need a framebuffer to be considered enabled. */
7099 		num_active += (new_plane_state->fb != NULL);
7100 	}
7101 
7102 	return num_active;
7103 }
7104 
7105 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7106 					 struct drm_crtc_state *new_crtc_state)
7107 {
7108 	struct dm_crtc_state *dm_new_crtc_state =
7109 		to_dm_crtc_state(new_crtc_state);
7110 
7111 	dm_new_crtc_state->active_planes = 0;
7112 
7113 	if (!dm_new_crtc_state->stream)
7114 		return;
7115 
7116 	dm_new_crtc_state->active_planes =
7117 		count_crtc_active_planes(new_crtc_state);
7118 }
7119 
7120 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7121 				       struct drm_atomic_state *state)
7122 {
7123 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7124 									  crtc);
7125 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7126 	struct dc *dc = adev->dm.dc;
7127 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7128 	int ret = -EINVAL;
7129 
7130 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7131 
7132 	dm_update_crtc_active_planes(crtc, crtc_state);
7133 
7134 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7135 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7136 		return ret;
7137 	}
7138 
7139 	/*
7140 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7141 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7142 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7143 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7144 	 */
7145 	if (crtc_state->enable &&
7146 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7147 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7148 		return -EINVAL;
7149 	}
7150 
7151 	/* In some use cases, like reset, no stream is attached */
7152 	if (!dm_crtc_state->stream)
7153 		return 0;
7154 
7155 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7156 		return 0;
7157 
7158 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7159 	return ret;
7160 }
7161 
7162 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7163 				      const struct drm_display_mode *mode,
7164 				      struct drm_display_mode *adjusted_mode)
7165 {
7166 	return true;
7167 }
7168 
7169 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7170 	.disable = dm_crtc_helper_disable,
7171 	.atomic_check = dm_crtc_helper_atomic_check,
7172 	.mode_fixup = dm_crtc_helper_mode_fixup,
7173 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7174 };
7175 
7176 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7177 {
7178 
7179 }
7180 
7181 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7182 {
7183 	switch (display_color_depth) {
7184 		case COLOR_DEPTH_666:
7185 			return 6;
7186 		case COLOR_DEPTH_888:
7187 			return 8;
7188 		case COLOR_DEPTH_101010:
7189 			return 10;
7190 		case COLOR_DEPTH_121212:
7191 			return 12;
7192 		case COLOR_DEPTH_141414:
7193 			return 14;
7194 		case COLOR_DEPTH_161616:
7195 			return 16;
7196 		default:
7197 			break;
7198 		}
7199 	return 0;
7200 }
7201 
7202 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7203 					  struct drm_crtc_state *crtc_state,
7204 					  struct drm_connector_state *conn_state)
7205 {
7206 	struct drm_atomic_state *state = crtc_state->state;
7207 	struct drm_connector *connector = conn_state->connector;
7208 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7209 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7210 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7211 	struct drm_dp_mst_topology_mgr *mst_mgr;
7212 	struct drm_dp_mst_port *mst_port;
7213 	enum dc_color_depth color_depth;
7214 	int clock, bpp = 0;
7215 	bool is_y420 = false;
7216 
7217 	if (!aconnector->port || !aconnector->dc_sink)
7218 		return 0;
7219 
7220 	mst_port = aconnector->port;
7221 	mst_mgr = &aconnector->mst_port->mst_mgr;
7222 
7223 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7224 		return 0;
7225 
7226 	if (!state->duplicated) {
7227 		int max_bpc = conn_state->max_requested_bpc;
7228 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7229 				aconnector->force_yuv420_output;
7230 		color_depth = convert_color_depth_from_display_info(connector,
7231 								    is_y420,
7232 								    max_bpc);
7233 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7234 		clock = adjusted_mode->clock;
7235 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7236 	}
7237 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7238 									   mst_mgr,
7239 									   mst_port,
7240 									   dm_new_connector_state->pbn,
7241 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7242 	if (dm_new_connector_state->vcpi_slots < 0) {
7243 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7244 		return dm_new_connector_state->vcpi_slots;
7245 	}
7246 	return 0;
7247 }
7248 
7249 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7250 	.disable = dm_encoder_helper_disable,
7251 	.atomic_check = dm_encoder_helper_atomic_check
7252 };
7253 
7254 #if defined(CONFIG_DRM_AMD_DC_DCN)
7255 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7256 					    struct dc_state *dc_state,
7257 					    struct dsc_mst_fairness_vars *vars)
7258 {
7259 	struct dc_stream_state *stream = NULL;
7260 	struct drm_connector *connector;
7261 	struct drm_connector_state *new_con_state;
7262 	struct amdgpu_dm_connector *aconnector;
7263 	struct dm_connector_state *dm_conn_state;
7264 	int i, j;
7265 	int vcpi, pbn_div, pbn, slot_num = 0;
7266 
7267 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7268 
7269 		aconnector = to_amdgpu_dm_connector(connector);
7270 
7271 		if (!aconnector->port)
7272 			continue;
7273 
7274 		if (!new_con_state || !new_con_state->crtc)
7275 			continue;
7276 
7277 		dm_conn_state = to_dm_connector_state(new_con_state);
7278 
7279 		for (j = 0; j < dc_state->stream_count; j++) {
7280 			stream = dc_state->streams[j];
7281 			if (!stream)
7282 				continue;
7283 
7284 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7285 				break;
7286 
7287 			stream = NULL;
7288 		}
7289 
7290 		if (!stream)
7291 			continue;
7292 
7293 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7294 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7295 		for (j = 0; j < dc_state->stream_count; j++) {
7296 			if (vars[j].aconnector == aconnector) {
7297 				pbn = vars[j].pbn;
7298 				break;
7299 			}
7300 		}
7301 
7302 		if (j == dc_state->stream_count)
7303 			continue;
7304 
7305 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7306 
7307 		if (stream->timing.flags.DSC != 1) {
7308 			dm_conn_state->pbn = pbn;
7309 			dm_conn_state->vcpi_slots = slot_num;
7310 
7311 			drm_dp_mst_atomic_enable_dsc(state,
7312 						     aconnector->port,
7313 						     dm_conn_state->pbn,
7314 						     0,
7315 						     false);
7316 			continue;
7317 		}
7318 
7319 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7320 						    aconnector->port,
7321 						    pbn, pbn_div,
7322 						    true);
7323 		if (vcpi < 0)
7324 			return vcpi;
7325 
7326 		dm_conn_state->pbn = pbn;
7327 		dm_conn_state->vcpi_slots = vcpi;
7328 	}
7329 	return 0;
7330 }
7331 #endif
7332 
7333 static void dm_drm_plane_reset(struct drm_plane *plane)
7334 {
7335 	struct dm_plane_state *amdgpu_state = NULL;
7336 
7337 	if (plane->state)
7338 		plane->funcs->atomic_destroy_state(plane, plane->state);
7339 
7340 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7341 	WARN_ON(amdgpu_state == NULL);
7342 
7343 	if (amdgpu_state)
7344 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7345 }
7346 
7347 static struct drm_plane_state *
7348 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7349 {
7350 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7351 
7352 	old_dm_plane_state = to_dm_plane_state(plane->state);
7353 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7354 	if (!dm_plane_state)
7355 		return NULL;
7356 
7357 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7358 
7359 	if (old_dm_plane_state->dc_state) {
7360 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7361 		dc_plane_state_retain(dm_plane_state->dc_state);
7362 	}
7363 
7364 	return &dm_plane_state->base;
7365 }
7366 
7367 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7368 				struct drm_plane_state *state)
7369 {
7370 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7371 
7372 	if (dm_plane_state->dc_state)
7373 		dc_plane_state_release(dm_plane_state->dc_state);
7374 
7375 	drm_atomic_helper_plane_destroy_state(plane, state);
7376 }
7377 
7378 static const struct drm_plane_funcs dm_plane_funcs = {
7379 	.update_plane	= drm_atomic_helper_update_plane,
7380 	.disable_plane	= drm_atomic_helper_disable_plane,
7381 	.destroy	= drm_primary_helper_destroy,
7382 	.reset = dm_drm_plane_reset,
7383 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7384 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7385 	.format_mod_supported = dm_plane_format_mod_supported,
7386 };
7387 
7388 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7389 				      struct drm_plane_state *new_state)
7390 {
7391 	struct amdgpu_framebuffer *afb;
7392 	struct drm_gem_object *obj;
7393 	struct amdgpu_device *adev;
7394 	struct amdgpu_bo *rbo;
7395 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7396 	struct list_head list;
7397 	struct ttm_validate_buffer tv;
7398 	struct ww_acquire_ctx ticket;
7399 	uint32_t domain;
7400 	int r;
7401 
7402 	if (!new_state->fb) {
7403 		DRM_DEBUG_KMS("No FB bound\n");
7404 		return 0;
7405 	}
7406 
7407 	afb = to_amdgpu_framebuffer(new_state->fb);
7408 	obj = new_state->fb->obj[0];
7409 	rbo = gem_to_amdgpu_bo(obj);
7410 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7411 	INIT_LIST_HEAD(&list);
7412 
7413 	tv.bo = &rbo->tbo;
7414 	tv.num_shared = 1;
7415 	list_add(&tv.head, &list);
7416 
7417 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7418 	if (r) {
7419 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7420 		return r;
7421 	}
7422 
7423 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7424 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7425 	else
7426 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7427 
7428 	r = amdgpu_bo_pin(rbo, domain);
7429 	if (unlikely(r != 0)) {
7430 		if (r != -ERESTARTSYS)
7431 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7432 		ttm_eu_backoff_reservation(&ticket, &list);
7433 		return r;
7434 	}
7435 
7436 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7437 	if (unlikely(r != 0)) {
7438 		amdgpu_bo_unpin(rbo);
7439 		ttm_eu_backoff_reservation(&ticket, &list);
7440 		DRM_ERROR("%p bind failed\n", rbo);
7441 		return r;
7442 	}
7443 
7444 	ttm_eu_backoff_reservation(&ticket, &list);
7445 
7446 	afb->address = amdgpu_bo_gpu_offset(rbo);
7447 
7448 	amdgpu_bo_ref(rbo);
7449 
7450 	/**
7451 	 * We don't do surface updates on planes that have been newly created,
7452 	 * but we also don't have the afb->address during atomic check.
7453 	 *
7454 	 * Fill in buffer attributes depending on the address here, but only on
7455 	 * newly created planes since they're not being used by DC yet and this
7456 	 * won't modify global state.
7457 	 */
7458 	dm_plane_state_old = to_dm_plane_state(plane->state);
7459 	dm_plane_state_new = to_dm_plane_state(new_state);
7460 
7461 	if (dm_plane_state_new->dc_state &&
7462 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7463 		struct dc_plane_state *plane_state =
7464 			dm_plane_state_new->dc_state;
7465 		bool force_disable_dcc = !plane_state->dcc.enable;
7466 
7467 		fill_plane_buffer_attributes(
7468 			adev, afb, plane_state->format, plane_state->rotation,
7469 			afb->tiling_flags,
7470 			&plane_state->tiling_info, &plane_state->plane_size,
7471 			&plane_state->dcc, &plane_state->address,
7472 			afb->tmz_surface, force_disable_dcc);
7473 	}
7474 
7475 	return 0;
7476 }
7477 
7478 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7479 				       struct drm_plane_state *old_state)
7480 {
7481 	struct amdgpu_bo *rbo;
7482 	int r;
7483 
7484 	if (!old_state->fb)
7485 		return;
7486 
7487 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7488 	r = amdgpu_bo_reserve(rbo, false);
7489 	if (unlikely(r)) {
7490 		DRM_ERROR("failed to reserve rbo before unpin\n");
7491 		return;
7492 	}
7493 
7494 	amdgpu_bo_unpin(rbo);
7495 	amdgpu_bo_unreserve(rbo);
7496 	amdgpu_bo_unref(&rbo);
7497 }
7498 
7499 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7500 				       struct drm_crtc_state *new_crtc_state)
7501 {
7502 	struct drm_framebuffer *fb = state->fb;
7503 	int min_downscale, max_upscale;
7504 	int min_scale = 0;
7505 	int max_scale = INT_MAX;
7506 
7507 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7508 	if (fb && state->crtc) {
7509 		/* Validate viewport to cover the case when only the position changes */
7510 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7511 			int viewport_width = state->crtc_w;
7512 			int viewport_height = state->crtc_h;
7513 
7514 			if (state->crtc_x < 0)
7515 				viewport_width += state->crtc_x;
7516 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7517 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7518 
7519 			if (state->crtc_y < 0)
7520 				viewport_height += state->crtc_y;
7521 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7522 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7523 
7524 			if (viewport_width < 0 || viewport_height < 0) {
7525 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7526 				return -EINVAL;
7527 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7528 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7529 				return -EINVAL;
7530 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7531 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7532 				return -EINVAL;
7533 			}
7534 
7535 		}
7536 
7537 		/* Get min/max allowed scaling factors from plane caps. */
7538 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7539 					     &min_downscale, &max_upscale);
7540 		/*
7541 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7542 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7543 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7544 		 */
7545 		min_scale = (1000 << 16) / max_upscale;
7546 		max_scale = (1000 << 16) / min_downscale;
7547 	}
7548 
7549 	return drm_atomic_helper_check_plane_state(
7550 		state, new_crtc_state, min_scale, max_scale, true, true);
7551 }
7552 
7553 static int dm_plane_atomic_check(struct drm_plane *plane,
7554 				 struct drm_atomic_state *state)
7555 {
7556 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7557 										 plane);
7558 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7559 	struct dc *dc = adev->dm.dc;
7560 	struct dm_plane_state *dm_plane_state;
7561 	struct dc_scaling_info scaling_info;
7562 	struct drm_crtc_state *new_crtc_state;
7563 	int ret;
7564 
7565 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7566 
7567 	dm_plane_state = to_dm_plane_state(new_plane_state);
7568 
7569 	if (!dm_plane_state->dc_state)
7570 		return 0;
7571 
7572 	new_crtc_state =
7573 		drm_atomic_get_new_crtc_state(state,
7574 					      new_plane_state->crtc);
7575 	if (!new_crtc_state)
7576 		return -EINVAL;
7577 
7578 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7579 	if (ret)
7580 		return ret;
7581 
7582 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7583 	if (ret)
7584 		return ret;
7585 
7586 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7587 		return 0;
7588 
7589 	return -EINVAL;
7590 }
7591 
7592 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7593 				       struct drm_atomic_state *state)
7594 {
7595 	/* Only support async updates on cursor planes. */
7596 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7597 		return -EINVAL;
7598 
7599 	return 0;
7600 }
7601 
7602 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7603 					 struct drm_atomic_state *state)
7604 {
7605 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7606 									   plane);
7607 	struct drm_plane_state *old_state =
7608 		drm_atomic_get_old_plane_state(state, plane);
7609 
7610 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7611 
7612 	swap(plane->state->fb, new_state->fb);
7613 
7614 	plane->state->src_x = new_state->src_x;
7615 	plane->state->src_y = new_state->src_y;
7616 	plane->state->src_w = new_state->src_w;
7617 	plane->state->src_h = new_state->src_h;
7618 	plane->state->crtc_x = new_state->crtc_x;
7619 	plane->state->crtc_y = new_state->crtc_y;
7620 	plane->state->crtc_w = new_state->crtc_w;
7621 	plane->state->crtc_h = new_state->crtc_h;
7622 
7623 	handle_cursor_update(plane, old_state);
7624 }
7625 
7626 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7627 	.prepare_fb = dm_plane_helper_prepare_fb,
7628 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7629 	.atomic_check = dm_plane_atomic_check,
7630 	.atomic_async_check = dm_plane_atomic_async_check,
7631 	.atomic_async_update = dm_plane_atomic_async_update
7632 };
7633 
7634 /*
7635  * TODO: these are currently initialized to rgb formats only.
7636  * For future use cases we should either initialize them dynamically based on
7637  * plane capabilities, or initialize this array to all formats, so internal drm
7638  * check will succeed, and let DC implement proper check
7639  */
7640 static const uint32_t rgb_formats[] = {
7641 	DRM_FORMAT_XRGB8888,
7642 	DRM_FORMAT_ARGB8888,
7643 	DRM_FORMAT_RGBA8888,
7644 	DRM_FORMAT_XRGB2101010,
7645 	DRM_FORMAT_XBGR2101010,
7646 	DRM_FORMAT_ARGB2101010,
7647 	DRM_FORMAT_ABGR2101010,
7648 	DRM_FORMAT_XRGB16161616,
7649 	DRM_FORMAT_XBGR16161616,
7650 	DRM_FORMAT_ARGB16161616,
7651 	DRM_FORMAT_ABGR16161616,
7652 	DRM_FORMAT_XBGR8888,
7653 	DRM_FORMAT_ABGR8888,
7654 	DRM_FORMAT_RGB565,
7655 };
7656 
7657 static const uint32_t overlay_formats[] = {
7658 	DRM_FORMAT_XRGB8888,
7659 	DRM_FORMAT_ARGB8888,
7660 	DRM_FORMAT_RGBA8888,
7661 	DRM_FORMAT_XBGR8888,
7662 	DRM_FORMAT_ABGR8888,
7663 	DRM_FORMAT_RGB565
7664 };
7665 
7666 static const u32 cursor_formats[] = {
7667 	DRM_FORMAT_ARGB8888
7668 };
7669 
7670 static int get_plane_formats(const struct drm_plane *plane,
7671 			     const struct dc_plane_cap *plane_cap,
7672 			     uint32_t *formats, int max_formats)
7673 {
7674 	int i, num_formats = 0;
7675 
7676 	/*
7677 	 * TODO: Query support for each group of formats directly from
7678 	 * DC plane caps. This will require adding more formats to the
7679 	 * caps list.
7680 	 */
7681 
7682 	switch (plane->type) {
7683 	case DRM_PLANE_TYPE_PRIMARY:
7684 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7685 			if (num_formats >= max_formats)
7686 				break;
7687 
7688 			formats[num_formats++] = rgb_formats[i];
7689 		}
7690 
7691 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7692 			formats[num_formats++] = DRM_FORMAT_NV12;
7693 		if (plane_cap && plane_cap->pixel_format_support.p010)
7694 			formats[num_formats++] = DRM_FORMAT_P010;
7695 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7696 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7697 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7698 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7699 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7700 		}
7701 		break;
7702 
7703 	case DRM_PLANE_TYPE_OVERLAY:
7704 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7705 			if (num_formats >= max_formats)
7706 				break;
7707 
7708 			formats[num_formats++] = overlay_formats[i];
7709 		}
7710 		break;
7711 
7712 	case DRM_PLANE_TYPE_CURSOR:
7713 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7714 			if (num_formats >= max_formats)
7715 				break;
7716 
7717 			formats[num_formats++] = cursor_formats[i];
7718 		}
7719 		break;
7720 	}
7721 
7722 	return num_formats;
7723 }
7724 
7725 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7726 				struct drm_plane *plane,
7727 				unsigned long possible_crtcs,
7728 				const struct dc_plane_cap *plane_cap)
7729 {
7730 	uint32_t formats[32];
7731 	int num_formats;
7732 	int res = -EPERM;
7733 	unsigned int supported_rotations;
7734 	uint64_t *modifiers = NULL;
7735 
7736 	num_formats = get_plane_formats(plane, plane_cap, formats,
7737 					ARRAY_SIZE(formats));
7738 
7739 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7740 	if (res)
7741 		return res;
7742 
7743 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7744 				       &dm_plane_funcs, formats, num_formats,
7745 				       modifiers, plane->type, NULL);
7746 	kfree(modifiers);
7747 	if (res)
7748 		return res;
7749 
7750 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7751 	    plane_cap && plane_cap->per_pixel_alpha) {
7752 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7753 					  BIT(DRM_MODE_BLEND_PREMULTI);
7754 
7755 		drm_plane_create_alpha_property(plane);
7756 		drm_plane_create_blend_mode_property(plane, blend_caps);
7757 	}
7758 
7759 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7760 	    plane_cap &&
7761 	    (plane_cap->pixel_format_support.nv12 ||
7762 	     plane_cap->pixel_format_support.p010)) {
7763 		/* This only affects YUV formats. */
7764 		drm_plane_create_color_properties(
7765 			plane,
7766 			BIT(DRM_COLOR_YCBCR_BT601) |
7767 			BIT(DRM_COLOR_YCBCR_BT709) |
7768 			BIT(DRM_COLOR_YCBCR_BT2020),
7769 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7770 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7771 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7772 	}
7773 
7774 	supported_rotations =
7775 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7776 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7777 
7778 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7779 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7780 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7781 						   supported_rotations);
7782 
7783 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7784 
7785 	/* Create (reset) the plane state */
7786 	if (plane->funcs->reset)
7787 		plane->funcs->reset(plane);
7788 
7789 	return 0;
7790 }
7791 
7792 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7793 			       struct drm_plane *plane,
7794 			       uint32_t crtc_index)
7795 {
7796 	struct amdgpu_crtc *acrtc = NULL;
7797 	struct drm_plane *cursor_plane;
7798 
7799 	int res = -ENOMEM;
7800 
7801 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7802 	if (!cursor_plane)
7803 		goto fail;
7804 
7805 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7806 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7807 
7808 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7809 	if (!acrtc)
7810 		goto fail;
7811 
7812 	res = drm_crtc_init_with_planes(
7813 			dm->ddev,
7814 			&acrtc->base,
7815 			plane,
7816 			cursor_plane,
7817 			&amdgpu_dm_crtc_funcs, NULL);
7818 
7819 	if (res)
7820 		goto fail;
7821 
7822 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7823 
7824 	/* Create (reset) the plane state */
7825 	if (acrtc->base.funcs->reset)
7826 		acrtc->base.funcs->reset(&acrtc->base);
7827 
7828 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7829 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7830 
7831 	acrtc->crtc_id = crtc_index;
7832 	acrtc->base.enabled = false;
7833 	acrtc->otg_inst = -1;
7834 
7835 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7836 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7837 				   true, MAX_COLOR_LUT_ENTRIES);
7838 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7839 
7840 	return 0;
7841 
7842 fail:
7843 	kfree(acrtc);
7844 	kfree(cursor_plane);
7845 	return res;
7846 }
7847 
7848 
7849 static int to_drm_connector_type(enum signal_type st)
7850 {
7851 	switch (st) {
7852 	case SIGNAL_TYPE_HDMI_TYPE_A:
7853 		return DRM_MODE_CONNECTOR_HDMIA;
7854 	case SIGNAL_TYPE_EDP:
7855 		return DRM_MODE_CONNECTOR_eDP;
7856 	case SIGNAL_TYPE_LVDS:
7857 		return DRM_MODE_CONNECTOR_LVDS;
7858 	case SIGNAL_TYPE_RGB:
7859 		return DRM_MODE_CONNECTOR_VGA;
7860 	case SIGNAL_TYPE_DISPLAY_PORT:
7861 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7862 		return DRM_MODE_CONNECTOR_DisplayPort;
7863 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7864 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7865 		return DRM_MODE_CONNECTOR_DVID;
7866 	case SIGNAL_TYPE_VIRTUAL:
7867 		return DRM_MODE_CONNECTOR_VIRTUAL;
7868 
7869 	default:
7870 		return DRM_MODE_CONNECTOR_Unknown;
7871 	}
7872 }
7873 
7874 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7875 {
7876 	struct drm_encoder *encoder;
7877 
7878 	/* There is only one encoder per connector */
7879 	drm_connector_for_each_possible_encoder(connector, encoder)
7880 		return encoder;
7881 
7882 	return NULL;
7883 }
7884 
7885 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7886 {
7887 	struct drm_encoder *encoder;
7888 	struct amdgpu_encoder *amdgpu_encoder;
7889 
7890 	encoder = amdgpu_dm_connector_to_encoder(connector);
7891 
7892 	if (encoder == NULL)
7893 		return;
7894 
7895 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7896 
7897 	amdgpu_encoder->native_mode.clock = 0;
7898 
7899 	if (!list_empty(&connector->probed_modes)) {
7900 		struct drm_display_mode *preferred_mode = NULL;
7901 
7902 		list_for_each_entry(preferred_mode,
7903 				    &connector->probed_modes,
7904 				    head) {
7905 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7906 				amdgpu_encoder->native_mode = *preferred_mode;
7907 
7908 			break;
7909 		}
7910 
7911 	}
7912 }
7913 
7914 static struct drm_display_mode *
7915 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7916 			     char *name,
7917 			     int hdisplay, int vdisplay)
7918 {
7919 	struct drm_device *dev = encoder->dev;
7920 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7921 	struct drm_display_mode *mode = NULL;
7922 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7923 
7924 	mode = drm_mode_duplicate(dev, native_mode);
7925 
7926 	if (mode == NULL)
7927 		return NULL;
7928 
7929 	mode->hdisplay = hdisplay;
7930 	mode->vdisplay = vdisplay;
7931 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7932 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7933 
7934 	return mode;
7935 
7936 }
7937 
7938 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7939 						 struct drm_connector *connector)
7940 {
7941 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7942 	struct drm_display_mode *mode = NULL;
7943 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7944 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7945 				to_amdgpu_dm_connector(connector);
7946 	int i;
7947 	int n;
7948 	struct mode_size {
7949 		char name[DRM_DISPLAY_MODE_LEN];
7950 		int w;
7951 		int h;
7952 	} common_modes[] = {
7953 		{  "640x480",  640,  480},
7954 		{  "800x600",  800,  600},
7955 		{ "1024x768", 1024,  768},
7956 		{ "1280x720", 1280,  720},
7957 		{ "1280x800", 1280,  800},
7958 		{"1280x1024", 1280, 1024},
7959 		{ "1440x900", 1440,  900},
7960 		{"1680x1050", 1680, 1050},
7961 		{"1600x1200", 1600, 1200},
7962 		{"1920x1080", 1920, 1080},
7963 		{"1920x1200", 1920, 1200}
7964 	};
7965 
7966 	n = ARRAY_SIZE(common_modes);
7967 
7968 	for (i = 0; i < n; i++) {
7969 		struct drm_display_mode *curmode = NULL;
7970 		bool mode_existed = false;
7971 
7972 		if (common_modes[i].w > native_mode->hdisplay ||
7973 		    common_modes[i].h > native_mode->vdisplay ||
7974 		   (common_modes[i].w == native_mode->hdisplay &&
7975 		    common_modes[i].h == native_mode->vdisplay))
7976 			continue;
7977 
7978 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7979 			if (common_modes[i].w == curmode->hdisplay &&
7980 			    common_modes[i].h == curmode->vdisplay) {
7981 				mode_existed = true;
7982 				break;
7983 			}
7984 		}
7985 
7986 		if (mode_existed)
7987 			continue;
7988 
7989 		mode = amdgpu_dm_create_common_mode(encoder,
7990 				common_modes[i].name, common_modes[i].w,
7991 				common_modes[i].h);
7992 		drm_mode_probed_add(connector, mode);
7993 		amdgpu_dm_connector->num_modes++;
7994 	}
7995 }
7996 
7997 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7998 {
7999 	struct drm_encoder *encoder;
8000 	struct amdgpu_encoder *amdgpu_encoder;
8001 	const struct drm_display_mode *native_mode;
8002 
8003 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8004 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8005 		return;
8006 
8007 	encoder = amdgpu_dm_connector_to_encoder(connector);
8008 	if (!encoder)
8009 		return;
8010 
8011 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8012 
8013 	native_mode = &amdgpu_encoder->native_mode;
8014 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8015 		return;
8016 
8017 	drm_connector_set_panel_orientation_with_quirk(connector,
8018 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8019 						       native_mode->hdisplay,
8020 						       native_mode->vdisplay);
8021 }
8022 
8023 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8024 					      struct edid *edid)
8025 {
8026 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8027 			to_amdgpu_dm_connector(connector);
8028 
8029 	if (edid) {
8030 		/* empty probed_modes */
8031 		INIT_LIST_HEAD(&connector->probed_modes);
8032 		amdgpu_dm_connector->num_modes =
8033 				drm_add_edid_modes(connector, edid);
8034 
8035 		/* sorting the probed modes before calling function
8036 		 * amdgpu_dm_get_native_mode() since EDID can have
8037 		 * more than one preferred mode. The modes that are
8038 		 * later in the probed mode list could be of higher
8039 		 * and preferred resolution. For example, 3840x2160
8040 		 * resolution in base EDID preferred timing and 4096x2160
8041 		 * preferred resolution in DID extension block later.
8042 		 */
8043 		drm_mode_sort(&connector->probed_modes);
8044 		amdgpu_dm_get_native_mode(connector);
8045 
8046 		/* Freesync capabilities are reset by calling
8047 		 * drm_add_edid_modes() and need to be
8048 		 * restored here.
8049 		 */
8050 		amdgpu_dm_update_freesync_caps(connector, edid);
8051 
8052 		amdgpu_set_panel_orientation(connector);
8053 	} else {
8054 		amdgpu_dm_connector->num_modes = 0;
8055 	}
8056 }
8057 
8058 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8059 			      struct drm_display_mode *mode)
8060 {
8061 	struct drm_display_mode *m;
8062 
8063 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8064 		if (drm_mode_equal(m, mode))
8065 			return true;
8066 	}
8067 
8068 	return false;
8069 }
8070 
8071 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8072 {
8073 	const struct drm_display_mode *m;
8074 	struct drm_display_mode *new_mode;
8075 	uint i;
8076 	uint32_t new_modes_count = 0;
8077 
8078 	/* Standard FPS values
8079 	 *
8080 	 * 23.976       - TV/NTSC
8081 	 * 24 	        - Cinema
8082 	 * 25 	        - TV/PAL
8083 	 * 29.97        - TV/NTSC
8084 	 * 30 	        - TV/NTSC
8085 	 * 48 	        - Cinema HFR
8086 	 * 50 	        - TV/PAL
8087 	 * 60 	        - Commonly used
8088 	 * 48,72,96,120 - Multiples of 24
8089 	 */
8090 	static const uint32_t common_rates[] = {
8091 		23976, 24000, 25000, 29970, 30000,
8092 		48000, 50000, 60000, 72000, 96000, 120000
8093 	};
8094 
8095 	/*
8096 	 * Find mode with highest refresh rate with the same resolution
8097 	 * as the preferred mode. Some monitors report a preferred mode
8098 	 * with lower resolution than the highest refresh rate supported.
8099 	 */
8100 
8101 	m = get_highest_refresh_rate_mode(aconnector, true);
8102 	if (!m)
8103 		return 0;
8104 
8105 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8106 		uint64_t target_vtotal, target_vtotal_diff;
8107 		uint64_t num, den;
8108 
8109 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8110 			continue;
8111 
8112 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8113 		    common_rates[i] > aconnector->max_vfreq * 1000)
8114 			continue;
8115 
8116 		num = (unsigned long long)m->clock * 1000 * 1000;
8117 		den = common_rates[i] * (unsigned long long)m->htotal;
8118 		target_vtotal = div_u64(num, den);
8119 		target_vtotal_diff = target_vtotal - m->vtotal;
8120 
8121 		/* Check for illegal modes */
8122 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8123 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8124 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8125 			continue;
8126 
8127 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8128 		if (!new_mode)
8129 			goto out;
8130 
8131 		new_mode->vtotal += (u16)target_vtotal_diff;
8132 		new_mode->vsync_start += (u16)target_vtotal_diff;
8133 		new_mode->vsync_end += (u16)target_vtotal_diff;
8134 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8135 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8136 
8137 		if (!is_duplicate_mode(aconnector, new_mode)) {
8138 			drm_mode_probed_add(&aconnector->base, new_mode);
8139 			new_modes_count += 1;
8140 		} else
8141 			drm_mode_destroy(aconnector->base.dev, new_mode);
8142 	}
8143  out:
8144 	return new_modes_count;
8145 }
8146 
8147 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8148 						   struct edid *edid)
8149 {
8150 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8151 		to_amdgpu_dm_connector(connector);
8152 
8153 	if (!(amdgpu_freesync_vid_mode && edid))
8154 		return;
8155 
8156 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8157 		amdgpu_dm_connector->num_modes +=
8158 			add_fs_modes(amdgpu_dm_connector);
8159 }
8160 
8161 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8162 {
8163 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8164 			to_amdgpu_dm_connector(connector);
8165 	struct drm_encoder *encoder;
8166 	struct edid *edid = amdgpu_dm_connector->edid;
8167 
8168 	encoder = amdgpu_dm_connector_to_encoder(connector);
8169 
8170 	if (!drm_edid_is_valid(edid)) {
8171 		amdgpu_dm_connector->num_modes =
8172 				drm_add_modes_noedid(connector, 640, 480);
8173 	} else {
8174 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8175 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8176 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8177 	}
8178 	amdgpu_dm_fbc_init(connector);
8179 
8180 	return amdgpu_dm_connector->num_modes;
8181 }
8182 
8183 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8184 				     struct amdgpu_dm_connector *aconnector,
8185 				     int connector_type,
8186 				     struct dc_link *link,
8187 				     int link_index)
8188 {
8189 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8190 
8191 	/*
8192 	 * Some of the properties below require access to state, like bpc.
8193 	 * Allocate some default initial connector state with our reset helper.
8194 	 */
8195 	if (aconnector->base.funcs->reset)
8196 		aconnector->base.funcs->reset(&aconnector->base);
8197 
8198 	aconnector->connector_id = link_index;
8199 	aconnector->dc_link = link;
8200 	aconnector->base.interlace_allowed = false;
8201 	aconnector->base.doublescan_allowed = false;
8202 	aconnector->base.stereo_allowed = false;
8203 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8204 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8205 	aconnector->audio_inst = -1;
8206 	mutex_init(&aconnector->hpd_lock);
8207 
8208 	/*
8209 	 * configure support HPD hot plug connector_>polled default value is 0
8210 	 * which means HPD hot plug not supported
8211 	 */
8212 	switch (connector_type) {
8213 	case DRM_MODE_CONNECTOR_HDMIA:
8214 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8215 		aconnector->base.ycbcr_420_allowed =
8216 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8217 		break;
8218 	case DRM_MODE_CONNECTOR_DisplayPort:
8219 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8220 		if (link->is_dig_mapping_flexible &&
8221 		    link->dc->res_pool->funcs->link_encs_assign) {
8222 			link->link_enc =
8223 				link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8224 			if (!link->link_enc)
8225 				link->link_enc =
8226 					link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8227 		}
8228 
8229 		if (link->link_enc)
8230 			aconnector->base.ycbcr_420_allowed =
8231 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8232 		break;
8233 	case DRM_MODE_CONNECTOR_DVID:
8234 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8235 		break;
8236 	default:
8237 		break;
8238 	}
8239 
8240 	drm_object_attach_property(&aconnector->base.base,
8241 				dm->ddev->mode_config.scaling_mode_property,
8242 				DRM_MODE_SCALE_NONE);
8243 
8244 	drm_object_attach_property(&aconnector->base.base,
8245 				adev->mode_info.underscan_property,
8246 				UNDERSCAN_OFF);
8247 	drm_object_attach_property(&aconnector->base.base,
8248 				adev->mode_info.underscan_hborder_property,
8249 				0);
8250 	drm_object_attach_property(&aconnector->base.base,
8251 				adev->mode_info.underscan_vborder_property,
8252 				0);
8253 
8254 	if (!aconnector->mst_port)
8255 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8256 
8257 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8258 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8259 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8260 
8261 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8262 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8263 		drm_object_attach_property(&aconnector->base.base,
8264 				adev->mode_info.abm_level_property, 0);
8265 	}
8266 
8267 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8268 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8269 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8270 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8271 
8272 		if (!aconnector->mst_port)
8273 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8274 
8275 #ifdef CONFIG_DRM_AMD_DC_HDCP
8276 		if (adev->dm.hdcp_workqueue)
8277 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8278 #endif
8279 	}
8280 }
8281 
8282 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8283 			      struct i2c_msg *msgs, int num)
8284 {
8285 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8286 	struct ddc_service *ddc_service = i2c->ddc_service;
8287 	struct i2c_command cmd;
8288 	int i;
8289 	int result = -EIO;
8290 
8291 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8292 
8293 	if (!cmd.payloads)
8294 		return result;
8295 
8296 	cmd.number_of_payloads = num;
8297 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8298 	cmd.speed = 100;
8299 
8300 	for (i = 0; i < num; i++) {
8301 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8302 		cmd.payloads[i].address = msgs[i].addr;
8303 		cmd.payloads[i].length = msgs[i].len;
8304 		cmd.payloads[i].data = msgs[i].buf;
8305 	}
8306 
8307 	if (dc_submit_i2c(
8308 			ddc_service->ctx->dc,
8309 			ddc_service->ddc_pin->hw_info.ddc_channel,
8310 			&cmd))
8311 		result = num;
8312 
8313 	kfree(cmd.payloads);
8314 	return result;
8315 }
8316 
8317 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8318 {
8319 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8320 }
8321 
8322 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8323 	.master_xfer = amdgpu_dm_i2c_xfer,
8324 	.functionality = amdgpu_dm_i2c_func,
8325 };
8326 
8327 static struct amdgpu_i2c_adapter *
8328 create_i2c(struct ddc_service *ddc_service,
8329 	   int link_index,
8330 	   int *res)
8331 {
8332 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8333 	struct amdgpu_i2c_adapter *i2c;
8334 
8335 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8336 	if (!i2c)
8337 		return NULL;
8338 	i2c->base.owner = THIS_MODULE;
8339 	i2c->base.class = I2C_CLASS_DDC;
8340 	i2c->base.dev.parent = &adev->pdev->dev;
8341 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8342 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8343 	i2c_set_adapdata(&i2c->base, i2c);
8344 	i2c->ddc_service = ddc_service;
8345 	if (i2c->ddc_service->ddc_pin)
8346 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8347 
8348 	return i2c;
8349 }
8350 
8351 
8352 /*
8353  * Note: this function assumes that dc_link_detect() was called for the
8354  * dc_link which will be represented by this aconnector.
8355  */
8356 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8357 				    struct amdgpu_dm_connector *aconnector,
8358 				    uint32_t link_index,
8359 				    struct amdgpu_encoder *aencoder)
8360 {
8361 	int res = 0;
8362 	int connector_type;
8363 	struct dc *dc = dm->dc;
8364 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8365 	struct amdgpu_i2c_adapter *i2c;
8366 
8367 	link->priv = aconnector;
8368 
8369 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8370 
8371 	i2c = create_i2c(link->ddc, link->link_index, &res);
8372 	if (!i2c) {
8373 		DRM_ERROR("Failed to create i2c adapter data\n");
8374 		return -ENOMEM;
8375 	}
8376 
8377 	aconnector->i2c = i2c;
8378 	res = i2c_add_adapter(&i2c->base);
8379 
8380 	if (res) {
8381 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8382 		goto out_free;
8383 	}
8384 
8385 	connector_type = to_drm_connector_type(link->connector_signal);
8386 
8387 	res = drm_connector_init_with_ddc(
8388 			dm->ddev,
8389 			&aconnector->base,
8390 			&amdgpu_dm_connector_funcs,
8391 			connector_type,
8392 			&i2c->base);
8393 
8394 	if (res) {
8395 		DRM_ERROR("connector_init failed\n");
8396 		aconnector->connector_id = -1;
8397 		goto out_free;
8398 	}
8399 
8400 	drm_connector_helper_add(
8401 			&aconnector->base,
8402 			&amdgpu_dm_connector_helper_funcs);
8403 
8404 	amdgpu_dm_connector_init_helper(
8405 		dm,
8406 		aconnector,
8407 		connector_type,
8408 		link,
8409 		link_index);
8410 
8411 	drm_connector_attach_encoder(
8412 		&aconnector->base, &aencoder->base);
8413 
8414 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8415 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8416 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8417 
8418 out_free:
8419 	if (res) {
8420 		kfree(i2c);
8421 		aconnector->i2c = NULL;
8422 	}
8423 	return res;
8424 }
8425 
8426 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8427 {
8428 	switch (adev->mode_info.num_crtc) {
8429 	case 1:
8430 		return 0x1;
8431 	case 2:
8432 		return 0x3;
8433 	case 3:
8434 		return 0x7;
8435 	case 4:
8436 		return 0xf;
8437 	case 5:
8438 		return 0x1f;
8439 	case 6:
8440 	default:
8441 		return 0x3f;
8442 	}
8443 }
8444 
8445 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8446 				  struct amdgpu_encoder *aencoder,
8447 				  uint32_t link_index)
8448 {
8449 	struct amdgpu_device *adev = drm_to_adev(dev);
8450 
8451 	int res = drm_encoder_init(dev,
8452 				   &aencoder->base,
8453 				   &amdgpu_dm_encoder_funcs,
8454 				   DRM_MODE_ENCODER_TMDS,
8455 				   NULL);
8456 
8457 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8458 
8459 	if (!res)
8460 		aencoder->encoder_id = link_index;
8461 	else
8462 		aencoder->encoder_id = -1;
8463 
8464 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8465 
8466 	return res;
8467 }
8468 
8469 static void manage_dm_interrupts(struct amdgpu_device *adev,
8470 				 struct amdgpu_crtc *acrtc,
8471 				 bool enable)
8472 {
8473 	/*
8474 	 * We have no guarantee that the frontend index maps to the same
8475 	 * backend index - some even map to more than one.
8476 	 *
8477 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8478 	 */
8479 	int irq_type =
8480 		amdgpu_display_crtc_idx_to_irq_type(
8481 			adev,
8482 			acrtc->crtc_id);
8483 
8484 	if (enable) {
8485 		drm_crtc_vblank_on(&acrtc->base);
8486 		amdgpu_irq_get(
8487 			adev,
8488 			&adev->pageflip_irq,
8489 			irq_type);
8490 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8491 		amdgpu_irq_get(
8492 			adev,
8493 			&adev->vline0_irq,
8494 			irq_type);
8495 #endif
8496 	} else {
8497 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8498 		amdgpu_irq_put(
8499 			adev,
8500 			&adev->vline0_irq,
8501 			irq_type);
8502 #endif
8503 		amdgpu_irq_put(
8504 			adev,
8505 			&adev->pageflip_irq,
8506 			irq_type);
8507 		drm_crtc_vblank_off(&acrtc->base);
8508 	}
8509 }
8510 
8511 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8512 				      struct amdgpu_crtc *acrtc)
8513 {
8514 	int irq_type =
8515 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8516 
8517 	/**
8518 	 * This reads the current state for the IRQ and force reapplies
8519 	 * the setting to hardware.
8520 	 */
8521 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8522 }
8523 
8524 static bool
8525 is_scaling_state_different(const struct dm_connector_state *dm_state,
8526 			   const struct dm_connector_state *old_dm_state)
8527 {
8528 	if (dm_state->scaling != old_dm_state->scaling)
8529 		return true;
8530 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8531 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8532 			return true;
8533 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8534 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8535 			return true;
8536 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8537 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8538 		return true;
8539 	return false;
8540 }
8541 
8542 #ifdef CONFIG_DRM_AMD_DC_HDCP
8543 static bool is_content_protection_different(struct drm_connector_state *state,
8544 					    const struct drm_connector_state *old_state,
8545 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8546 {
8547 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8548 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8549 
8550 	/* Handle: Type0/1 change */
8551 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8552 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8553 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8554 		return true;
8555 	}
8556 
8557 	/* CP is being re enabled, ignore this
8558 	 *
8559 	 * Handles:	ENABLED -> DESIRED
8560 	 */
8561 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8562 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8563 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8564 		return false;
8565 	}
8566 
8567 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8568 	 *
8569 	 * Handles:	UNDESIRED -> ENABLED
8570 	 */
8571 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8572 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8573 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8574 
8575 	/* Stream removed and re-enabled
8576 	 *
8577 	 * Can sometimes overlap with the HPD case,
8578 	 * thus set update_hdcp to false to avoid
8579 	 * setting HDCP multiple times.
8580 	 *
8581 	 * Handles:	DESIRED -> DESIRED (Special case)
8582 	 */
8583 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8584 		state->crtc && state->crtc->enabled &&
8585 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8586 		dm_con_state->update_hdcp = false;
8587 		return true;
8588 	}
8589 
8590 	/* Hot-plug, headless s3, dpms
8591 	 *
8592 	 * Only start HDCP if the display is connected/enabled.
8593 	 * update_hdcp flag will be set to false until the next
8594 	 * HPD comes in.
8595 	 *
8596 	 * Handles:	DESIRED -> DESIRED (Special case)
8597 	 */
8598 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8599 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8600 		dm_con_state->update_hdcp = false;
8601 		return true;
8602 	}
8603 
8604 	/*
8605 	 * Handles:	UNDESIRED -> UNDESIRED
8606 	 *		DESIRED -> DESIRED
8607 	 *		ENABLED -> ENABLED
8608 	 */
8609 	if (old_state->content_protection == state->content_protection)
8610 		return false;
8611 
8612 	/*
8613 	 * Handles:	UNDESIRED -> DESIRED
8614 	 *		DESIRED -> UNDESIRED
8615 	 *		ENABLED -> UNDESIRED
8616 	 */
8617 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8618 		return true;
8619 
8620 	/*
8621 	 * Handles:	DESIRED -> ENABLED
8622 	 */
8623 	return false;
8624 }
8625 
8626 #endif
8627 static void remove_stream(struct amdgpu_device *adev,
8628 			  struct amdgpu_crtc *acrtc,
8629 			  struct dc_stream_state *stream)
8630 {
8631 	/* this is the update mode case */
8632 
8633 	acrtc->otg_inst = -1;
8634 	acrtc->enabled = false;
8635 }
8636 
8637 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8638 			       struct dc_cursor_position *position)
8639 {
8640 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8641 	int x, y;
8642 	int xorigin = 0, yorigin = 0;
8643 
8644 	if (!crtc || !plane->state->fb)
8645 		return 0;
8646 
8647 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8648 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8649 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8650 			  __func__,
8651 			  plane->state->crtc_w,
8652 			  plane->state->crtc_h);
8653 		return -EINVAL;
8654 	}
8655 
8656 	x = plane->state->crtc_x;
8657 	y = plane->state->crtc_y;
8658 
8659 	if (x <= -amdgpu_crtc->max_cursor_width ||
8660 	    y <= -amdgpu_crtc->max_cursor_height)
8661 		return 0;
8662 
8663 	if (x < 0) {
8664 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8665 		x = 0;
8666 	}
8667 	if (y < 0) {
8668 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8669 		y = 0;
8670 	}
8671 	position->enable = true;
8672 	position->translate_by_source = true;
8673 	position->x = x;
8674 	position->y = y;
8675 	position->x_hotspot = xorigin;
8676 	position->y_hotspot = yorigin;
8677 
8678 	return 0;
8679 }
8680 
8681 static void handle_cursor_update(struct drm_plane *plane,
8682 				 struct drm_plane_state *old_plane_state)
8683 {
8684 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8685 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8686 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8687 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8688 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8689 	uint64_t address = afb ? afb->address : 0;
8690 	struct dc_cursor_position position = {0};
8691 	struct dc_cursor_attributes attributes;
8692 	int ret;
8693 
8694 	if (!plane->state->fb && !old_plane_state->fb)
8695 		return;
8696 
8697 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8698 		      __func__,
8699 		      amdgpu_crtc->crtc_id,
8700 		      plane->state->crtc_w,
8701 		      plane->state->crtc_h);
8702 
8703 	ret = get_cursor_position(plane, crtc, &position);
8704 	if (ret)
8705 		return;
8706 
8707 	if (!position.enable) {
8708 		/* turn off cursor */
8709 		if (crtc_state && crtc_state->stream) {
8710 			mutex_lock(&adev->dm.dc_lock);
8711 			dc_stream_set_cursor_position(crtc_state->stream,
8712 						      &position);
8713 			mutex_unlock(&adev->dm.dc_lock);
8714 		}
8715 		return;
8716 	}
8717 
8718 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8719 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8720 
8721 	memset(&attributes, 0, sizeof(attributes));
8722 	attributes.address.high_part = upper_32_bits(address);
8723 	attributes.address.low_part  = lower_32_bits(address);
8724 	attributes.width             = plane->state->crtc_w;
8725 	attributes.height            = plane->state->crtc_h;
8726 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8727 	attributes.rotation_angle    = 0;
8728 	attributes.attribute_flags.value = 0;
8729 
8730 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8731 
8732 	if (crtc_state->stream) {
8733 		mutex_lock(&adev->dm.dc_lock);
8734 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8735 							 &attributes))
8736 			DRM_ERROR("DC failed to set cursor attributes\n");
8737 
8738 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8739 						   &position))
8740 			DRM_ERROR("DC failed to set cursor position\n");
8741 		mutex_unlock(&adev->dm.dc_lock);
8742 	}
8743 }
8744 
8745 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8746 {
8747 
8748 	assert_spin_locked(&acrtc->base.dev->event_lock);
8749 	WARN_ON(acrtc->event);
8750 
8751 	acrtc->event = acrtc->base.state->event;
8752 
8753 	/* Set the flip status */
8754 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8755 
8756 	/* Mark this event as consumed */
8757 	acrtc->base.state->event = NULL;
8758 
8759 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8760 		     acrtc->crtc_id);
8761 }
8762 
8763 static void update_freesync_state_on_stream(
8764 	struct amdgpu_display_manager *dm,
8765 	struct dm_crtc_state *new_crtc_state,
8766 	struct dc_stream_state *new_stream,
8767 	struct dc_plane_state *surface,
8768 	u32 flip_timestamp_in_us)
8769 {
8770 	struct mod_vrr_params vrr_params;
8771 	struct dc_info_packet vrr_infopacket = {0};
8772 	struct amdgpu_device *adev = dm->adev;
8773 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8774 	unsigned long flags;
8775 	bool pack_sdp_v1_3 = false;
8776 
8777 	if (!new_stream)
8778 		return;
8779 
8780 	/*
8781 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8782 	 * For now it's sufficient to just guard against these conditions.
8783 	 */
8784 
8785 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8786 		return;
8787 
8788 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8789         vrr_params = acrtc->dm_irq_params.vrr_params;
8790 
8791 	if (surface) {
8792 		mod_freesync_handle_preflip(
8793 			dm->freesync_module,
8794 			surface,
8795 			new_stream,
8796 			flip_timestamp_in_us,
8797 			&vrr_params);
8798 
8799 		if (adev->family < AMDGPU_FAMILY_AI &&
8800 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8801 			mod_freesync_handle_v_update(dm->freesync_module,
8802 						     new_stream, &vrr_params);
8803 
8804 			/* Need to call this before the frame ends. */
8805 			dc_stream_adjust_vmin_vmax(dm->dc,
8806 						   new_crtc_state->stream,
8807 						   &vrr_params.adjust);
8808 		}
8809 	}
8810 
8811 	mod_freesync_build_vrr_infopacket(
8812 		dm->freesync_module,
8813 		new_stream,
8814 		&vrr_params,
8815 		PACKET_TYPE_VRR,
8816 		TRANSFER_FUNC_UNKNOWN,
8817 		&vrr_infopacket,
8818 		pack_sdp_v1_3);
8819 
8820 	new_crtc_state->freesync_timing_changed |=
8821 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8822 			&vrr_params.adjust,
8823 			sizeof(vrr_params.adjust)) != 0);
8824 
8825 	new_crtc_state->freesync_vrr_info_changed |=
8826 		(memcmp(&new_crtc_state->vrr_infopacket,
8827 			&vrr_infopacket,
8828 			sizeof(vrr_infopacket)) != 0);
8829 
8830 	acrtc->dm_irq_params.vrr_params = vrr_params;
8831 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8832 
8833 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8834 	new_stream->vrr_infopacket = vrr_infopacket;
8835 
8836 	if (new_crtc_state->freesync_vrr_info_changed)
8837 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8838 			      new_crtc_state->base.crtc->base.id,
8839 			      (int)new_crtc_state->base.vrr_enabled,
8840 			      (int)vrr_params.state);
8841 
8842 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8843 }
8844 
8845 static void update_stream_irq_parameters(
8846 	struct amdgpu_display_manager *dm,
8847 	struct dm_crtc_state *new_crtc_state)
8848 {
8849 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8850 	struct mod_vrr_params vrr_params;
8851 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8852 	struct amdgpu_device *adev = dm->adev;
8853 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8854 	unsigned long flags;
8855 
8856 	if (!new_stream)
8857 		return;
8858 
8859 	/*
8860 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8861 	 * For now it's sufficient to just guard against these conditions.
8862 	 */
8863 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8864 		return;
8865 
8866 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8867 	vrr_params = acrtc->dm_irq_params.vrr_params;
8868 
8869 	if (new_crtc_state->vrr_supported &&
8870 	    config.min_refresh_in_uhz &&
8871 	    config.max_refresh_in_uhz) {
8872 		/*
8873 		 * if freesync compatible mode was set, config.state will be set
8874 		 * in atomic check
8875 		 */
8876 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8877 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8878 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8879 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8880 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8881 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8882 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8883 		} else {
8884 			config.state = new_crtc_state->base.vrr_enabled ?
8885 						     VRR_STATE_ACTIVE_VARIABLE :
8886 						     VRR_STATE_INACTIVE;
8887 		}
8888 	} else {
8889 		config.state = VRR_STATE_UNSUPPORTED;
8890 	}
8891 
8892 	mod_freesync_build_vrr_params(dm->freesync_module,
8893 				      new_stream,
8894 				      &config, &vrr_params);
8895 
8896 	new_crtc_state->freesync_timing_changed |=
8897 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8898 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8899 
8900 	new_crtc_state->freesync_config = config;
8901 	/* Copy state for access from DM IRQ handler */
8902 	acrtc->dm_irq_params.freesync_config = config;
8903 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8904 	acrtc->dm_irq_params.vrr_params = vrr_params;
8905 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8906 }
8907 
8908 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8909 					    struct dm_crtc_state *new_state)
8910 {
8911 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8912 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8913 
8914 	if (!old_vrr_active && new_vrr_active) {
8915 		/* Transition VRR inactive -> active:
8916 		 * While VRR is active, we must not disable vblank irq, as a
8917 		 * reenable after disable would compute bogus vblank/pflip
8918 		 * timestamps if it likely happened inside display front-porch.
8919 		 *
8920 		 * We also need vupdate irq for the actual core vblank handling
8921 		 * at end of vblank.
8922 		 */
8923 		dm_set_vupdate_irq(new_state->base.crtc, true);
8924 		drm_crtc_vblank_get(new_state->base.crtc);
8925 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8926 				 __func__, new_state->base.crtc->base.id);
8927 	} else if (old_vrr_active && !new_vrr_active) {
8928 		/* Transition VRR active -> inactive:
8929 		 * Allow vblank irq disable again for fixed refresh rate.
8930 		 */
8931 		dm_set_vupdate_irq(new_state->base.crtc, false);
8932 		drm_crtc_vblank_put(new_state->base.crtc);
8933 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8934 				 __func__, new_state->base.crtc->base.id);
8935 	}
8936 }
8937 
8938 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8939 {
8940 	struct drm_plane *plane;
8941 	struct drm_plane_state *old_plane_state;
8942 	int i;
8943 
8944 	/*
8945 	 * TODO: Make this per-stream so we don't issue redundant updates for
8946 	 * commits with multiple streams.
8947 	 */
8948 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8949 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8950 			handle_cursor_update(plane, old_plane_state);
8951 }
8952 
8953 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8954 				    struct dc_state *dc_state,
8955 				    struct drm_device *dev,
8956 				    struct amdgpu_display_manager *dm,
8957 				    struct drm_crtc *pcrtc,
8958 				    bool wait_for_vblank)
8959 {
8960 	uint32_t i;
8961 	uint64_t timestamp_ns;
8962 	struct drm_plane *plane;
8963 	struct drm_plane_state *old_plane_state, *new_plane_state;
8964 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8965 	struct drm_crtc_state *new_pcrtc_state =
8966 			drm_atomic_get_new_crtc_state(state, pcrtc);
8967 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8968 	struct dm_crtc_state *dm_old_crtc_state =
8969 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8970 	int planes_count = 0, vpos, hpos;
8971 	long r;
8972 	unsigned long flags;
8973 	struct amdgpu_bo *abo;
8974 	uint32_t target_vblank, last_flip_vblank;
8975 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8976 	bool pflip_present = false;
8977 	struct {
8978 		struct dc_surface_update surface_updates[MAX_SURFACES];
8979 		struct dc_plane_info plane_infos[MAX_SURFACES];
8980 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8981 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8982 		struct dc_stream_update stream_update;
8983 	} *bundle;
8984 
8985 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8986 
8987 	if (!bundle) {
8988 		dm_error("Failed to allocate update bundle\n");
8989 		goto cleanup;
8990 	}
8991 
8992 	/*
8993 	 * Disable the cursor first if we're disabling all the planes.
8994 	 * It'll remain on the screen after the planes are re-enabled
8995 	 * if we don't.
8996 	 */
8997 	if (acrtc_state->active_planes == 0)
8998 		amdgpu_dm_commit_cursors(state);
8999 
9000 	/* update planes when needed */
9001 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9002 		struct drm_crtc *crtc = new_plane_state->crtc;
9003 		struct drm_crtc_state *new_crtc_state;
9004 		struct drm_framebuffer *fb = new_plane_state->fb;
9005 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9006 		bool plane_needs_flip;
9007 		struct dc_plane_state *dc_plane;
9008 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9009 
9010 		/* Cursor plane is handled after stream updates */
9011 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9012 			continue;
9013 
9014 		if (!fb || !crtc || pcrtc != crtc)
9015 			continue;
9016 
9017 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9018 		if (!new_crtc_state->active)
9019 			continue;
9020 
9021 		dc_plane = dm_new_plane_state->dc_state;
9022 
9023 		bundle->surface_updates[planes_count].surface = dc_plane;
9024 		if (new_pcrtc_state->color_mgmt_changed) {
9025 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9026 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9027 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9028 		}
9029 
9030 		fill_dc_scaling_info(dm->adev, new_plane_state,
9031 				     &bundle->scaling_infos[planes_count]);
9032 
9033 		bundle->surface_updates[planes_count].scaling_info =
9034 			&bundle->scaling_infos[planes_count];
9035 
9036 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9037 
9038 		pflip_present = pflip_present || plane_needs_flip;
9039 
9040 		if (!plane_needs_flip) {
9041 			planes_count += 1;
9042 			continue;
9043 		}
9044 
9045 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9046 
9047 		/*
9048 		 * Wait for all fences on this FB. Do limited wait to avoid
9049 		 * deadlock during GPU reset when this fence will not signal
9050 		 * but we hold reservation lock for the BO.
9051 		 */
9052 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9053 					  msecs_to_jiffies(5000));
9054 		if (unlikely(r <= 0))
9055 			DRM_ERROR("Waiting for fences timed out!");
9056 
9057 		fill_dc_plane_info_and_addr(
9058 			dm->adev, new_plane_state,
9059 			afb->tiling_flags,
9060 			&bundle->plane_infos[planes_count],
9061 			&bundle->flip_addrs[planes_count].address,
9062 			afb->tmz_surface, false);
9063 
9064 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9065 				 new_plane_state->plane->index,
9066 				 bundle->plane_infos[planes_count].dcc.enable);
9067 
9068 		bundle->surface_updates[planes_count].plane_info =
9069 			&bundle->plane_infos[planes_count];
9070 
9071 		/*
9072 		 * Only allow immediate flips for fast updates that don't
9073 		 * change FB pitch, DCC state, rotation or mirroing.
9074 		 */
9075 		bundle->flip_addrs[planes_count].flip_immediate =
9076 			crtc->state->async_flip &&
9077 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9078 
9079 		timestamp_ns = ktime_get_ns();
9080 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9081 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9082 		bundle->surface_updates[planes_count].surface = dc_plane;
9083 
9084 		if (!bundle->surface_updates[planes_count].surface) {
9085 			DRM_ERROR("No surface for CRTC: id=%d\n",
9086 					acrtc_attach->crtc_id);
9087 			continue;
9088 		}
9089 
9090 		if (plane == pcrtc->primary)
9091 			update_freesync_state_on_stream(
9092 				dm,
9093 				acrtc_state,
9094 				acrtc_state->stream,
9095 				dc_plane,
9096 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9097 
9098 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9099 				 __func__,
9100 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9101 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9102 
9103 		planes_count += 1;
9104 
9105 	}
9106 
9107 	if (pflip_present) {
9108 		if (!vrr_active) {
9109 			/* Use old throttling in non-vrr fixed refresh rate mode
9110 			 * to keep flip scheduling based on target vblank counts
9111 			 * working in a backwards compatible way, e.g., for
9112 			 * clients using the GLX_OML_sync_control extension or
9113 			 * DRI3/Present extension with defined target_msc.
9114 			 */
9115 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9116 		}
9117 		else {
9118 			/* For variable refresh rate mode only:
9119 			 * Get vblank of last completed flip to avoid > 1 vrr
9120 			 * flips per video frame by use of throttling, but allow
9121 			 * flip programming anywhere in the possibly large
9122 			 * variable vrr vblank interval for fine-grained flip
9123 			 * timing control and more opportunity to avoid stutter
9124 			 * on late submission of flips.
9125 			 */
9126 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9127 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9128 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9129 		}
9130 
9131 		target_vblank = last_flip_vblank + wait_for_vblank;
9132 
9133 		/*
9134 		 * Wait until we're out of the vertical blank period before the one
9135 		 * targeted by the flip
9136 		 */
9137 		while ((acrtc_attach->enabled &&
9138 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9139 							    0, &vpos, &hpos, NULL,
9140 							    NULL, &pcrtc->hwmode)
9141 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9142 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9143 			(int)(target_vblank -
9144 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9145 			usleep_range(1000, 1100);
9146 		}
9147 
9148 		/**
9149 		 * Prepare the flip event for the pageflip interrupt to handle.
9150 		 *
9151 		 * This only works in the case where we've already turned on the
9152 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9153 		 * from 0 -> n planes we have to skip a hardware generated event
9154 		 * and rely on sending it from software.
9155 		 */
9156 		if (acrtc_attach->base.state->event &&
9157 		    acrtc_state->active_planes > 0 &&
9158 		    !acrtc_state->force_dpms_off) {
9159 			drm_crtc_vblank_get(pcrtc);
9160 
9161 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9162 
9163 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9164 			prepare_flip_isr(acrtc_attach);
9165 
9166 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9167 		}
9168 
9169 		if (acrtc_state->stream) {
9170 			if (acrtc_state->freesync_vrr_info_changed)
9171 				bundle->stream_update.vrr_infopacket =
9172 					&acrtc_state->stream->vrr_infopacket;
9173 		}
9174 	}
9175 
9176 	/* Update the planes if changed or disable if we don't have any. */
9177 	if ((planes_count || acrtc_state->active_planes == 0) &&
9178 		acrtc_state->stream) {
9179 #if defined(CONFIG_DRM_AMD_DC_DCN)
9180 		/*
9181 		 * If PSR or idle optimizations are enabled then flush out
9182 		 * any pending work before hardware programming.
9183 		 */
9184 		if (dm->vblank_control_workqueue)
9185 			flush_workqueue(dm->vblank_control_workqueue);
9186 #endif
9187 
9188 		bundle->stream_update.stream = acrtc_state->stream;
9189 		if (new_pcrtc_state->mode_changed) {
9190 			bundle->stream_update.src = acrtc_state->stream->src;
9191 			bundle->stream_update.dst = acrtc_state->stream->dst;
9192 		}
9193 
9194 		if (new_pcrtc_state->color_mgmt_changed) {
9195 			/*
9196 			 * TODO: This isn't fully correct since we've actually
9197 			 * already modified the stream in place.
9198 			 */
9199 			bundle->stream_update.gamut_remap =
9200 				&acrtc_state->stream->gamut_remap_matrix;
9201 			bundle->stream_update.output_csc_transform =
9202 				&acrtc_state->stream->csc_color_matrix;
9203 			bundle->stream_update.out_transfer_func =
9204 				acrtc_state->stream->out_transfer_func;
9205 		}
9206 
9207 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9208 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9209 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9210 
9211 		/*
9212 		 * If FreeSync state on the stream has changed then we need to
9213 		 * re-adjust the min/max bounds now that DC doesn't handle this
9214 		 * as part of commit.
9215 		 */
9216 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9217 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9218 			dc_stream_adjust_vmin_vmax(
9219 				dm->dc, acrtc_state->stream,
9220 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9221 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9222 		}
9223 		mutex_lock(&dm->dc_lock);
9224 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9225 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9226 			amdgpu_dm_psr_disable(acrtc_state->stream);
9227 
9228 		dc_commit_updates_for_stream(dm->dc,
9229 						     bundle->surface_updates,
9230 						     planes_count,
9231 						     acrtc_state->stream,
9232 						     &bundle->stream_update,
9233 						     dc_state);
9234 
9235 		/**
9236 		 * Enable or disable the interrupts on the backend.
9237 		 *
9238 		 * Most pipes are put into power gating when unused.
9239 		 *
9240 		 * When power gating is enabled on a pipe we lose the
9241 		 * interrupt enablement state when power gating is disabled.
9242 		 *
9243 		 * So we need to update the IRQ control state in hardware
9244 		 * whenever the pipe turns on (since it could be previously
9245 		 * power gated) or off (since some pipes can't be power gated
9246 		 * on some ASICs).
9247 		 */
9248 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9249 			dm_update_pflip_irq_state(drm_to_adev(dev),
9250 						  acrtc_attach);
9251 
9252 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9253 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9254 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9255 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9256 
9257 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9258 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9259 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9260 			struct amdgpu_dm_connector *aconn =
9261 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9262 
9263 			if (aconn->psr_skip_count > 0)
9264 				aconn->psr_skip_count--;
9265 
9266 			/* Allow PSR when skip count is 0. */
9267 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9268 		} else {
9269 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9270 		}
9271 
9272 		mutex_unlock(&dm->dc_lock);
9273 	}
9274 
9275 	/*
9276 	 * Update cursor state *after* programming all the planes.
9277 	 * This avoids redundant programming in the case where we're going
9278 	 * to be disabling a single plane - those pipes are being disabled.
9279 	 */
9280 	if (acrtc_state->active_planes)
9281 		amdgpu_dm_commit_cursors(state);
9282 
9283 cleanup:
9284 	kfree(bundle);
9285 }
9286 
9287 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9288 				   struct drm_atomic_state *state)
9289 {
9290 	struct amdgpu_device *adev = drm_to_adev(dev);
9291 	struct amdgpu_dm_connector *aconnector;
9292 	struct drm_connector *connector;
9293 	struct drm_connector_state *old_con_state, *new_con_state;
9294 	struct drm_crtc_state *new_crtc_state;
9295 	struct dm_crtc_state *new_dm_crtc_state;
9296 	const struct dc_stream_status *status;
9297 	int i, inst;
9298 
9299 	/* Notify device removals. */
9300 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9301 		if (old_con_state->crtc != new_con_state->crtc) {
9302 			/* CRTC changes require notification. */
9303 			goto notify;
9304 		}
9305 
9306 		if (!new_con_state->crtc)
9307 			continue;
9308 
9309 		new_crtc_state = drm_atomic_get_new_crtc_state(
9310 			state, new_con_state->crtc);
9311 
9312 		if (!new_crtc_state)
9313 			continue;
9314 
9315 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9316 			continue;
9317 
9318 	notify:
9319 		aconnector = to_amdgpu_dm_connector(connector);
9320 
9321 		mutex_lock(&adev->dm.audio_lock);
9322 		inst = aconnector->audio_inst;
9323 		aconnector->audio_inst = -1;
9324 		mutex_unlock(&adev->dm.audio_lock);
9325 
9326 		amdgpu_dm_audio_eld_notify(adev, inst);
9327 	}
9328 
9329 	/* Notify audio device additions. */
9330 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9331 		if (!new_con_state->crtc)
9332 			continue;
9333 
9334 		new_crtc_state = drm_atomic_get_new_crtc_state(
9335 			state, new_con_state->crtc);
9336 
9337 		if (!new_crtc_state)
9338 			continue;
9339 
9340 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9341 			continue;
9342 
9343 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9344 		if (!new_dm_crtc_state->stream)
9345 			continue;
9346 
9347 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9348 		if (!status)
9349 			continue;
9350 
9351 		aconnector = to_amdgpu_dm_connector(connector);
9352 
9353 		mutex_lock(&adev->dm.audio_lock);
9354 		inst = status->audio_inst;
9355 		aconnector->audio_inst = inst;
9356 		mutex_unlock(&adev->dm.audio_lock);
9357 
9358 		amdgpu_dm_audio_eld_notify(adev, inst);
9359 	}
9360 }
9361 
9362 /*
9363  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9364  * @crtc_state: the DRM CRTC state
9365  * @stream_state: the DC stream state.
9366  *
9367  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9368  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9369  */
9370 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9371 						struct dc_stream_state *stream_state)
9372 {
9373 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9374 }
9375 
9376 /**
9377  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9378  * @state: The atomic state to commit
9379  *
9380  * This will tell DC to commit the constructed DC state from atomic_check,
9381  * programming the hardware. Any failures here implies a hardware failure, since
9382  * atomic check should have filtered anything non-kosher.
9383  */
9384 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9385 {
9386 	struct drm_device *dev = state->dev;
9387 	struct amdgpu_device *adev = drm_to_adev(dev);
9388 	struct amdgpu_display_manager *dm = &adev->dm;
9389 	struct dm_atomic_state *dm_state;
9390 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9391 	uint32_t i, j;
9392 	struct drm_crtc *crtc;
9393 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9394 	unsigned long flags;
9395 	bool wait_for_vblank = true;
9396 	struct drm_connector *connector;
9397 	struct drm_connector_state *old_con_state, *new_con_state;
9398 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9399 	int crtc_disable_count = 0;
9400 	bool mode_set_reset_required = false;
9401 
9402 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9403 
9404 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9405 
9406 	dm_state = dm_atomic_get_new_state(state);
9407 	if (dm_state && dm_state->context) {
9408 		dc_state = dm_state->context;
9409 	} else {
9410 		/* No state changes, retain current state. */
9411 		dc_state_temp = dc_create_state(dm->dc);
9412 		ASSERT(dc_state_temp);
9413 		dc_state = dc_state_temp;
9414 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9415 	}
9416 
9417 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9418 				       new_crtc_state, i) {
9419 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9420 
9421 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9422 
9423 		if (old_crtc_state->active &&
9424 		    (!new_crtc_state->active ||
9425 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9426 			manage_dm_interrupts(adev, acrtc, false);
9427 			dc_stream_release(dm_old_crtc_state->stream);
9428 		}
9429 	}
9430 
9431 	drm_atomic_helper_calc_timestamping_constants(state);
9432 
9433 	/* update changed items */
9434 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9435 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9436 
9437 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9438 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9439 
9440 		DRM_DEBUG_ATOMIC(
9441 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9442 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9443 			"connectors_changed:%d\n",
9444 			acrtc->crtc_id,
9445 			new_crtc_state->enable,
9446 			new_crtc_state->active,
9447 			new_crtc_state->planes_changed,
9448 			new_crtc_state->mode_changed,
9449 			new_crtc_state->active_changed,
9450 			new_crtc_state->connectors_changed);
9451 
9452 		/* Disable cursor if disabling crtc */
9453 		if (old_crtc_state->active && !new_crtc_state->active) {
9454 			struct dc_cursor_position position;
9455 
9456 			memset(&position, 0, sizeof(position));
9457 			mutex_lock(&dm->dc_lock);
9458 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9459 			mutex_unlock(&dm->dc_lock);
9460 		}
9461 
9462 		/* Copy all transient state flags into dc state */
9463 		if (dm_new_crtc_state->stream) {
9464 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9465 							    dm_new_crtc_state->stream);
9466 		}
9467 
9468 		/* handles headless hotplug case, updating new_state and
9469 		 * aconnector as needed
9470 		 */
9471 
9472 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9473 
9474 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9475 
9476 			if (!dm_new_crtc_state->stream) {
9477 				/*
9478 				 * this could happen because of issues with
9479 				 * userspace notifications delivery.
9480 				 * In this case userspace tries to set mode on
9481 				 * display which is disconnected in fact.
9482 				 * dc_sink is NULL in this case on aconnector.
9483 				 * We expect reset mode will come soon.
9484 				 *
9485 				 * This can also happen when unplug is done
9486 				 * during resume sequence ended
9487 				 *
9488 				 * In this case, we want to pretend we still
9489 				 * have a sink to keep the pipe running so that
9490 				 * hw state is consistent with the sw state
9491 				 */
9492 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9493 						__func__, acrtc->base.base.id);
9494 				continue;
9495 			}
9496 
9497 			if (dm_old_crtc_state->stream)
9498 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9499 
9500 			pm_runtime_get_noresume(dev->dev);
9501 
9502 			acrtc->enabled = true;
9503 			acrtc->hw_mode = new_crtc_state->mode;
9504 			crtc->hwmode = new_crtc_state->mode;
9505 			mode_set_reset_required = true;
9506 		} else if (modereset_required(new_crtc_state)) {
9507 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9508 			/* i.e. reset mode */
9509 			if (dm_old_crtc_state->stream)
9510 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9511 
9512 			mode_set_reset_required = true;
9513 		}
9514 	} /* for_each_crtc_in_state() */
9515 
9516 	if (dc_state) {
9517 		/* if there mode set or reset, disable eDP PSR */
9518 		if (mode_set_reset_required) {
9519 #if defined(CONFIG_DRM_AMD_DC_DCN)
9520 			if (dm->vblank_control_workqueue)
9521 				flush_workqueue(dm->vblank_control_workqueue);
9522 #endif
9523 			amdgpu_dm_psr_disable_all(dm);
9524 		}
9525 
9526 		dm_enable_per_frame_crtc_master_sync(dc_state);
9527 		mutex_lock(&dm->dc_lock);
9528 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9529 #if defined(CONFIG_DRM_AMD_DC_DCN)
9530                /* Allow idle optimization when vblank count is 0 for display off */
9531                if (dm->active_vblank_irq_count == 0)
9532                    dc_allow_idle_optimizations(dm->dc,true);
9533 #endif
9534 		mutex_unlock(&dm->dc_lock);
9535 	}
9536 
9537 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9538 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9539 
9540 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9541 
9542 		if (dm_new_crtc_state->stream != NULL) {
9543 			const struct dc_stream_status *status =
9544 					dc_stream_get_status(dm_new_crtc_state->stream);
9545 
9546 			if (!status)
9547 				status = dc_stream_get_status_from_state(dc_state,
9548 									 dm_new_crtc_state->stream);
9549 			if (!status)
9550 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9551 			else
9552 				acrtc->otg_inst = status->primary_otg_inst;
9553 		}
9554 	}
9555 #ifdef CONFIG_DRM_AMD_DC_HDCP
9556 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9557 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9558 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9559 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9560 
9561 		new_crtc_state = NULL;
9562 
9563 		if (acrtc)
9564 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9565 
9566 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9567 
9568 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9569 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9570 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9571 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9572 			dm_new_con_state->update_hdcp = true;
9573 			continue;
9574 		}
9575 
9576 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9577 			hdcp_update_display(
9578 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9579 				new_con_state->hdcp_content_type,
9580 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9581 	}
9582 #endif
9583 
9584 	/* Handle connector state changes */
9585 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9586 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9587 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9588 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9589 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9590 		struct dc_stream_update stream_update;
9591 		struct dc_info_packet hdr_packet;
9592 		struct dc_stream_status *status = NULL;
9593 		bool abm_changed, hdr_changed, scaling_changed;
9594 
9595 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9596 		memset(&stream_update, 0, sizeof(stream_update));
9597 
9598 		if (acrtc) {
9599 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9600 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9601 		}
9602 
9603 		/* Skip any modesets/resets */
9604 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9605 			continue;
9606 
9607 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9608 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9609 
9610 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9611 							     dm_old_con_state);
9612 
9613 		abm_changed = dm_new_crtc_state->abm_level !=
9614 			      dm_old_crtc_state->abm_level;
9615 
9616 		hdr_changed =
9617 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9618 
9619 		if (!scaling_changed && !abm_changed && !hdr_changed)
9620 			continue;
9621 
9622 		stream_update.stream = dm_new_crtc_state->stream;
9623 		if (scaling_changed) {
9624 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9625 					dm_new_con_state, dm_new_crtc_state->stream);
9626 
9627 			stream_update.src = dm_new_crtc_state->stream->src;
9628 			stream_update.dst = dm_new_crtc_state->stream->dst;
9629 		}
9630 
9631 		if (abm_changed) {
9632 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9633 
9634 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9635 		}
9636 
9637 		if (hdr_changed) {
9638 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9639 			stream_update.hdr_static_metadata = &hdr_packet;
9640 		}
9641 
9642 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9643 
9644 		if (WARN_ON(!status))
9645 			continue;
9646 
9647 		WARN_ON(!status->plane_count);
9648 
9649 		/*
9650 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9651 		 * Here we create an empty update on each plane.
9652 		 * To fix this, DC should permit updating only stream properties.
9653 		 */
9654 		for (j = 0; j < status->plane_count; j++)
9655 			dummy_updates[j].surface = status->plane_states[0];
9656 
9657 
9658 		mutex_lock(&dm->dc_lock);
9659 		dc_commit_updates_for_stream(dm->dc,
9660 						     dummy_updates,
9661 						     status->plane_count,
9662 						     dm_new_crtc_state->stream,
9663 						     &stream_update,
9664 						     dc_state);
9665 		mutex_unlock(&dm->dc_lock);
9666 	}
9667 
9668 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9669 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9670 				      new_crtc_state, i) {
9671 		if (old_crtc_state->active && !new_crtc_state->active)
9672 			crtc_disable_count++;
9673 
9674 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9675 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9676 
9677 		/* For freesync config update on crtc state and params for irq */
9678 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9679 
9680 		/* Handle vrr on->off / off->on transitions */
9681 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9682 						dm_new_crtc_state);
9683 	}
9684 
9685 	/**
9686 	 * Enable interrupts for CRTCs that are newly enabled or went through
9687 	 * a modeset. It was intentionally deferred until after the front end
9688 	 * state was modified to wait until the OTG was on and so the IRQ
9689 	 * handlers didn't access stale or invalid state.
9690 	 */
9691 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9692 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9693 #ifdef CONFIG_DEBUG_FS
9694 		bool configure_crc = false;
9695 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9696 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9697 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9698 #endif
9699 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9700 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9701 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9702 #endif
9703 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9704 
9705 		if (new_crtc_state->active &&
9706 		    (!old_crtc_state->active ||
9707 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9708 			dc_stream_retain(dm_new_crtc_state->stream);
9709 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9710 			manage_dm_interrupts(adev, acrtc, true);
9711 
9712 #ifdef CONFIG_DEBUG_FS
9713 			/**
9714 			 * Frontend may have changed so reapply the CRC capture
9715 			 * settings for the stream.
9716 			 */
9717 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9718 
9719 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9720 				configure_crc = true;
9721 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9722 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9723 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9724 					acrtc->dm_irq_params.crc_window.update_win = true;
9725 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9726 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9727 					crc_rd_wrk->crtc = crtc;
9728 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9729 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9730 				}
9731 #endif
9732 			}
9733 
9734 			if (configure_crc)
9735 				if (amdgpu_dm_crtc_configure_crc_source(
9736 					crtc, dm_new_crtc_state, cur_crc_src))
9737 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9738 #endif
9739 		}
9740 	}
9741 
9742 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9743 		if (new_crtc_state->async_flip)
9744 			wait_for_vblank = false;
9745 
9746 	/* update planes when needed per crtc*/
9747 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9748 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9749 
9750 		if (dm_new_crtc_state->stream)
9751 			amdgpu_dm_commit_planes(state, dc_state, dev,
9752 						dm, crtc, wait_for_vblank);
9753 	}
9754 
9755 	/* Update audio instances for each connector. */
9756 	amdgpu_dm_commit_audio(dev, state);
9757 
9758 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9759 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9760 	/* restore the backlight level */
9761 	for (i = 0; i < dm->num_of_edps; i++) {
9762 		if (dm->backlight_dev[i] &&
9763 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9764 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9765 	}
9766 #endif
9767 	/*
9768 	 * send vblank event on all events not handled in flip and
9769 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9770 	 */
9771 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9772 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9773 
9774 		if (new_crtc_state->event)
9775 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9776 
9777 		new_crtc_state->event = NULL;
9778 	}
9779 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9780 
9781 	/* Signal HW programming completion */
9782 	drm_atomic_helper_commit_hw_done(state);
9783 
9784 	if (wait_for_vblank)
9785 		drm_atomic_helper_wait_for_flip_done(dev, state);
9786 
9787 	drm_atomic_helper_cleanup_planes(dev, state);
9788 
9789 	/* return the stolen vga memory back to VRAM */
9790 	if (!adev->mman.keep_stolen_vga_memory)
9791 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9792 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9793 
9794 	/*
9795 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9796 	 * so we can put the GPU into runtime suspend if we're not driving any
9797 	 * displays anymore
9798 	 */
9799 	for (i = 0; i < crtc_disable_count; i++)
9800 		pm_runtime_put_autosuspend(dev->dev);
9801 	pm_runtime_mark_last_busy(dev->dev);
9802 
9803 	if (dc_state_temp)
9804 		dc_release_state(dc_state_temp);
9805 }
9806 
9807 
9808 static int dm_force_atomic_commit(struct drm_connector *connector)
9809 {
9810 	int ret = 0;
9811 	struct drm_device *ddev = connector->dev;
9812 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9813 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9814 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9815 	struct drm_connector_state *conn_state;
9816 	struct drm_crtc_state *crtc_state;
9817 	struct drm_plane_state *plane_state;
9818 
9819 	if (!state)
9820 		return -ENOMEM;
9821 
9822 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9823 
9824 	/* Construct an atomic state to restore previous display setting */
9825 
9826 	/*
9827 	 * Attach connectors to drm_atomic_state
9828 	 */
9829 	conn_state = drm_atomic_get_connector_state(state, connector);
9830 
9831 	ret = PTR_ERR_OR_ZERO(conn_state);
9832 	if (ret)
9833 		goto out;
9834 
9835 	/* Attach crtc to drm_atomic_state*/
9836 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9837 
9838 	ret = PTR_ERR_OR_ZERO(crtc_state);
9839 	if (ret)
9840 		goto out;
9841 
9842 	/* force a restore */
9843 	crtc_state->mode_changed = true;
9844 
9845 	/* Attach plane to drm_atomic_state */
9846 	plane_state = drm_atomic_get_plane_state(state, plane);
9847 
9848 	ret = PTR_ERR_OR_ZERO(plane_state);
9849 	if (ret)
9850 		goto out;
9851 
9852 	/* Call commit internally with the state we just constructed */
9853 	ret = drm_atomic_commit(state);
9854 
9855 out:
9856 	drm_atomic_state_put(state);
9857 	if (ret)
9858 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9859 
9860 	return ret;
9861 }
9862 
9863 /*
9864  * This function handles all cases when set mode does not come upon hotplug.
9865  * This includes when a display is unplugged then plugged back into the
9866  * same port and when running without usermode desktop manager supprot
9867  */
9868 void dm_restore_drm_connector_state(struct drm_device *dev,
9869 				    struct drm_connector *connector)
9870 {
9871 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9872 	struct amdgpu_crtc *disconnected_acrtc;
9873 	struct dm_crtc_state *acrtc_state;
9874 
9875 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9876 		return;
9877 
9878 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9879 	if (!disconnected_acrtc)
9880 		return;
9881 
9882 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9883 	if (!acrtc_state->stream)
9884 		return;
9885 
9886 	/*
9887 	 * If the previous sink is not released and different from the current,
9888 	 * we deduce we are in a state where we can not rely on usermode call
9889 	 * to turn on the display, so we do it here
9890 	 */
9891 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9892 		dm_force_atomic_commit(&aconnector->base);
9893 }
9894 
9895 /*
9896  * Grabs all modesetting locks to serialize against any blocking commits,
9897  * Waits for completion of all non blocking commits.
9898  */
9899 static int do_aquire_global_lock(struct drm_device *dev,
9900 				 struct drm_atomic_state *state)
9901 {
9902 	struct drm_crtc *crtc;
9903 	struct drm_crtc_commit *commit;
9904 	long ret;
9905 
9906 	/*
9907 	 * Adding all modeset locks to aquire_ctx will
9908 	 * ensure that when the framework release it the
9909 	 * extra locks we are locking here will get released to
9910 	 */
9911 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9912 	if (ret)
9913 		return ret;
9914 
9915 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9916 		spin_lock(&crtc->commit_lock);
9917 		commit = list_first_entry_or_null(&crtc->commit_list,
9918 				struct drm_crtc_commit, commit_entry);
9919 		if (commit)
9920 			drm_crtc_commit_get(commit);
9921 		spin_unlock(&crtc->commit_lock);
9922 
9923 		if (!commit)
9924 			continue;
9925 
9926 		/*
9927 		 * Make sure all pending HW programming completed and
9928 		 * page flips done
9929 		 */
9930 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9931 
9932 		if (ret > 0)
9933 			ret = wait_for_completion_interruptible_timeout(
9934 					&commit->flip_done, 10*HZ);
9935 
9936 		if (ret == 0)
9937 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9938 				  "timed out\n", crtc->base.id, crtc->name);
9939 
9940 		drm_crtc_commit_put(commit);
9941 	}
9942 
9943 	return ret < 0 ? ret : 0;
9944 }
9945 
9946 static void get_freesync_config_for_crtc(
9947 	struct dm_crtc_state *new_crtc_state,
9948 	struct dm_connector_state *new_con_state)
9949 {
9950 	struct mod_freesync_config config = {0};
9951 	struct amdgpu_dm_connector *aconnector =
9952 			to_amdgpu_dm_connector(new_con_state->base.connector);
9953 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9954 	int vrefresh = drm_mode_vrefresh(mode);
9955 	bool fs_vid_mode = false;
9956 
9957 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9958 					vrefresh >= aconnector->min_vfreq &&
9959 					vrefresh <= aconnector->max_vfreq;
9960 
9961 	if (new_crtc_state->vrr_supported) {
9962 		new_crtc_state->stream->ignore_msa_timing_param = true;
9963 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9964 
9965 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9966 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9967 		config.vsif_supported = true;
9968 		config.btr = true;
9969 
9970 		if (fs_vid_mode) {
9971 			config.state = VRR_STATE_ACTIVE_FIXED;
9972 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9973 			goto out;
9974 		} else if (new_crtc_state->base.vrr_enabled) {
9975 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9976 		} else {
9977 			config.state = VRR_STATE_INACTIVE;
9978 		}
9979 	}
9980 out:
9981 	new_crtc_state->freesync_config = config;
9982 }
9983 
9984 static void reset_freesync_config_for_crtc(
9985 	struct dm_crtc_state *new_crtc_state)
9986 {
9987 	new_crtc_state->vrr_supported = false;
9988 
9989 	memset(&new_crtc_state->vrr_infopacket, 0,
9990 	       sizeof(new_crtc_state->vrr_infopacket));
9991 }
9992 
9993 static bool
9994 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9995 				 struct drm_crtc_state *new_crtc_state)
9996 {
9997 	struct drm_display_mode old_mode, new_mode;
9998 
9999 	if (!old_crtc_state || !new_crtc_state)
10000 		return false;
10001 
10002 	old_mode = old_crtc_state->mode;
10003 	new_mode = new_crtc_state->mode;
10004 
10005 	if (old_mode.clock       == new_mode.clock &&
10006 	    old_mode.hdisplay    == new_mode.hdisplay &&
10007 	    old_mode.vdisplay    == new_mode.vdisplay &&
10008 	    old_mode.htotal      == new_mode.htotal &&
10009 	    old_mode.vtotal      != new_mode.vtotal &&
10010 	    old_mode.hsync_start == new_mode.hsync_start &&
10011 	    old_mode.vsync_start != new_mode.vsync_start &&
10012 	    old_mode.hsync_end   == new_mode.hsync_end &&
10013 	    old_mode.vsync_end   != new_mode.vsync_end &&
10014 	    old_mode.hskew       == new_mode.hskew &&
10015 	    old_mode.vscan       == new_mode.vscan &&
10016 	    (old_mode.vsync_end - old_mode.vsync_start) ==
10017 	    (new_mode.vsync_end - new_mode.vsync_start))
10018 		return true;
10019 
10020 	return false;
10021 }
10022 
10023 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10024 	uint64_t num, den, res;
10025 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10026 
10027 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10028 
10029 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10030 	den = (unsigned long long)new_crtc_state->mode.htotal *
10031 	      (unsigned long long)new_crtc_state->mode.vtotal;
10032 
10033 	res = div_u64(num, den);
10034 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10035 }
10036 
10037 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10038 				struct drm_atomic_state *state,
10039 				struct drm_crtc *crtc,
10040 				struct drm_crtc_state *old_crtc_state,
10041 				struct drm_crtc_state *new_crtc_state,
10042 				bool enable,
10043 				bool *lock_and_validation_needed)
10044 {
10045 	struct dm_atomic_state *dm_state = NULL;
10046 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10047 	struct dc_stream_state *new_stream;
10048 	int ret = 0;
10049 
10050 	/*
10051 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10052 	 * update changed items
10053 	 */
10054 	struct amdgpu_crtc *acrtc = NULL;
10055 	struct amdgpu_dm_connector *aconnector = NULL;
10056 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10057 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10058 
10059 	new_stream = NULL;
10060 
10061 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10062 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10063 	acrtc = to_amdgpu_crtc(crtc);
10064 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10065 
10066 	/* TODO This hack should go away */
10067 	if (aconnector && enable) {
10068 		/* Make sure fake sink is created in plug-in scenario */
10069 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10070 							    &aconnector->base);
10071 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10072 							    &aconnector->base);
10073 
10074 		if (IS_ERR(drm_new_conn_state)) {
10075 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10076 			goto fail;
10077 		}
10078 
10079 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10080 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10081 
10082 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10083 			goto skip_modeset;
10084 
10085 		new_stream = create_validate_stream_for_sink(aconnector,
10086 							     &new_crtc_state->mode,
10087 							     dm_new_conn_state,
10088 							     dm_old_crtc_state->stream);
10089 
10090 		/*
10091 		 * we can have no stream on ACTION_SET if a display
10092 		 * was disconnected during S3, in this case it is not an
10093 		 * error, the OS will be updated after detection, and
10094 		 * will do the right thing on next atomic commit
10095 		 */
10096 
10097 		if (!new_stream) {
10098 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10099 					__func__, acrtc->base.base.id);
10100 			ret = -ENOMEM;
10101 			goto fail;
10102 		}
10103 
10104 		/*
10105 		 * TODO: Check VSDB bits to decide whether this should
10106 		 * be enabled or not.
10107 		 */
10108 		new_stream->triggered_crtc_reset.enabled =
10109 			dm->force_timing_sync;
10110 
10111 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10112 
10113 		ret = fill_hdr_info_packet(drm_new_conn_state,
10114 					   &new_stream->hdr_static_metadata);
10115 		if (ret)
10116 			goto fail;
10117 
10118 		/*
10119 		 * If we already removed the old stream from the context
10120 		 * (and set the new stream to NULL) then we can't reuse
10121 		 * the old stream even if the stream and scaling are unchanged.
10122 		 * We'll hit the BUG_ON and black screen.
10123 		 *
10124 		 * TODO: Refactor this function to allow this check to work
10125 		 * in all conditions.
10126 		 */
10127 		if (amdgpu_freesync_vid_mode &&
10128 		    dm_new_crtc_state->stream &&
10129 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10130 			goto skip_modeset;
10131 
10132 		if (dm_new_crtc_state->stream &&
10133 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10134 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10135 			new_crtc_state->mode_changed = false;
10136 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10137 					 new_crtc_state->mode_changed);
10138 		}
10139 	}
10140 
10141 	/* mode_changed flag may get updated above, need to check again */
10142 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10143 		goto skip_modeset;
10144 
10145 	DRM_DEBUG_ATOMIC(
10146 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10147 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10148 		"connectors_changed:%d\n",
10149 		acrtc->crtc_id,
10150 		new_crtc_state->enable,
10151 		new_crtc_state->active,
10152 		new_crtc_state->planes_changed,
10153 		new_crtc_state->mode_changed,
10154 		new_crtc_state->active_changed,
10155 		new_crtc_state->connectors_changed);
10156 
10157 	/* Remove stream for any changed/disabled CRTC */
10158 	if (!enable) {
10159 
10160 		if (!dm_old_crtc_state->stream)
10161 			goto skip_modeset;
10162 
10163 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10164 		    is_timing_unchanged_for_freesync(new_crtc_state,
10165 						     old_crtc_state)) {
10166 			new_crtc_state->mode_changed = false;
10167 			DRM_DEBUG_DRIVER(
10168 				"Mode change not required for front porch change, "
10169 				"setting mode_changed to %d",
10170 				new_crtc_state->mode_changed);
10171 
10172 			set_freesync_fixed_config(dm_new_crtc_state);
10173 
10174 			goto skip_modeset;
10175 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10176 			   is_freesync_video_mode(&new_crtc_state->mode,
10177 						  aconnector)) {
10178 			struct drm_display_mode *high_mode;
10179 
10180 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10181 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10182 				set_freesync_fixed_config(dm_new_crtc_state);
10183 			}
10184 		}
10185 
10186 		ret = dm_atomic_get_state(state, &dm_state);
10187 		if (ret)
10188 			goto fail;
10189 
10190 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10191 				crtc->base.id);
10192 
10193 		/* i.e. reset mode */
10194 		if (dc_remove_stream_from_ctx(
10195 				dm->dc,
10196 				dm_state->context,
10197 				dm_old_crtc_state->stream) != DC_OK) {
10198 			ret = -EINVAL;
10199 			goto fail;
10200 		}
10201 
10202 		dc_stream_release(dm_old_crtc_state->stream);
10203 		dm_new_crtc_state->stream = NULL;
10204 
10205 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10206 
10207 		*lock_and_validation_needed = true;
10208 
10209 	} else {/* Add stream for any updated/enabled CRTC */
10210 		/*
10211 		 * Quick fix to prevent NULL pointer on new_stream when
10212 		 * added MST connectors not found in existing crtc_state in the chained mode
10213 		 * TODO: need to dig out the root cause of that
10214 		 */
10215 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10216 			goto skip_modeset;
10217 
10218 		if (modereset_required(new_crtc_state))
10219 			goto skip_modeset;
10220 
10221 		if (modeset_required(new_crtc_state, new_stream,
10222 				     dm_old_crtc_state->stream)) {
10223 
10224 			WARN_ON(dm_new_crtc_state->stream);
10225 
10226 			ret = dm_atomic_get_state(state, &dm_state);
10227 			if (ret)
10228 				goto fail;
10229 
10230 			dm_new_crtc_state->stream = new_stream;
10231 
10232 			dc_stream_retain(new_stream);
10233 
10234 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10235 					 crtc->base.id);
10236 
10237 			if (dc_add_stream_to_ctx(
10238 					dm->dc,
10239 					dm_state->context,
10240 					dm_new_crtc_state->stream) != DC_OK) {
10241 				ret = -EINVAL;
10242 				goto fail;
10243 			}
10244 
10245 			*lock_and_validation_needed = true;
10246 		}
10247 	}
10248 
10249 skip_modeset:
10250 	/* Release extra reference */
10251 	if (new_stream)
10252 		 dc_stream_release(new_stream);
10253 
10254 	/*
10255 	 * We want to do dc stream updates that do not require a
10256 	 * full modeset below.
10257 	 */
10258 	if (!(enable && aconnector && new_crtc_state->active))
10259 		return 0;
10260 	/*
10261 	 * Given above conditions, the dc state cannot be NULL because:
10262 	 * 1. We're in the process of enabling CRTCs (just been added
10263 	 *    to the dc context, or already is on the context)
10264 	 * 2. Has a valid connector attached, and
10265 	 * 3. Is currently active and enabled.
10266 	 * => The dc stream state currently exists.
10267 	 */
10268 	BUG_ON(dm_new_crtc_state->stream == NULL);
10269 
10270 	/* Scaling or underscan settings */
10271 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10272 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10273 		update_stream_scaling_settings(
10274 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10275 
10276 	/* ABM settings */
10277 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10278 
10279 	/*
10280 	 * Color management settings. We also update color properties
10281 	 * when a modeset is needed, to ensure it gets reprogrammed.
10282 	 */
10283 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10284 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10285 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10286 		if (ret)
10287 			goto fail;
10288 	}
10289 
10290 	/* Update Freesync settings. */
10291 	get_freesync_config_for_crtc(dm_new_crtc_state,
10292 				     dm_new_conn_state);
10293 
10294 	return ret;
10295 
10296 fail:
10297 	if (new_stream)
10298 		dc_stream_release(new_stream);
10299 	return ret;
10300 }
10301 
10302 static bool should_reset_plane(struct drm_atomic_state *state,
10303 			       struct drm_plane *plane,
10304 			       struct drm_plane_state *old_plane_state,
10305 			       struct drm_plane_state *new_plane_state)
10306 {
10307 	struct drm_plane *other;
10308 	struct drm_plane_state *old_other_state, *new_other_state;
10309 	struct drm_crtc_state *new_crtc_state;
10310 	int i;
10311 
10312 	/*
10313 	 * TODO: Remove this hack once the checks below are sufficient
10314 	 * enough to determine when we need to reset all the planes on
10315 	 * the stream.
10316 	 */
10317 	if (state->allow_modeset)
10318 		return true;
10319 
10320 	/* Exit early if we know that we're adding or removing the plane. */
10321 	if (old_plane_state->crtc != new_plane_state->crtc)
10322 		return true;
10323 
10324 	/* old crtc == new_crtc == NULL, plane not in context. */
10325 	if (!new_plane_state->crtc)
10326 		return false;
10327 
10328 	new_crtc_state =
10329 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10330 
10331 	if (!new_crtc_state)
10332 		return true;
10333 
10334 	/* CRTC Degamma changes currently require us to recreate planes. */
10335 	if (new_crtc_state->color_mgmt_changed)
10336 		return true;
10337 
10338 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10339 		return true;
10340 
10341 	/*
10342 	 * If there are any new primary or overlay planes being added or
10343 	 * removed then the z-order can potentially change. To ensure
10344 	 * correct z-order and pipe acquisition the current DC architecture
10345 	 * requires us to remove and recreate all existing planes.
10346 	 *
10347 	 * TODO: Come up with a more elegant solution for this.
10348 	 */
10349 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10350 		struct amdgpu_framebuffer *old_afb, *new_afb;
10351 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10352 			continue;
10353 
10354 		if (old_other_state->crtc != new_plane_state->crtc &&
10355 		    new_other_state->crtc != new_plane_state->crtc)
10356 			continue;
10357 
10358 		if (old_other_state->crtc != new_other_state->crtc)
10359 			return true;
10360 
10361 		/* Src/dst size and scaling updates. */
10362 		if (old_other_state->src_w != new_other_state->src_w ||
10363 		    old_other_state->src_h != new_other_state->src_h ||
10364 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10365 		    old_other_state->crtc_h != new_other_state->crtc_h)
10366 			return true;
10367 
10368 		/* Rotation / mirroring updates. */
10369 		if (old_other_state->rotation != new_other_state->rotation)
10370 			return true;
10371 
10372 		/* Blending updates. */
10373 		if (old_other_state->pixel_blend_mode !=
10374 		    new_other_state->pixel_blend_mode)
10375 			return true;
10376 
10377 		/* Alpha updates. */
10378 		if (old_other_state->alpha != new_other_state->alpha)
10379 			return true;
10380 
10381 		/* Colorspace changes. */
10382 		if (old_other_state->color_range != new_other_state->color_range ||
10383 		    old_other_state->color_encoding != new_other_state->color_encoding)
10384 			return true;
10385 
10386 		/* Framebuffer checks fall at the end. */
10387 		if (!old_other_state->fb || !new_other_state->fb)
10388 			continue;
10389 
10390 		/* Pixel format changes can require bandwidth updates. */
10391 		if (old_other_state->fb->format != new_other_state->fb->format)
10392 			return true;
10393 
10394 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10395 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10396 
10397 		/* Tiling and DCC changes also require bandwidth updates. */
10398 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10399 		    old_afb->base.modifier != new_afb->base.modifier)
10400 			return true;
10401 	}
10402 
10403 	return false;
10404 }
10405 
10406 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10407 			      struct drm_plane_state *new_plane_state,
10408 			      struct drm_framebuffer *fb)
10409 {
10410 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10411 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10412 	unsigned int pitch;
10413 	bool linear;
10414 
10415 	if (fb->width > new_acrtc->max_cursor_width ||
10416 	    fb->height > new_acrtc->max_cursor_height) {
10417 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10418 				 new_plane_state->fb->width,
10419 				 new_plane_state->fb->height);
10420 		return -EINVAL;
10421 	}
10422 	if (new_plane_state->src_w != fb->width << 16 ||
10423 	    new_plane_state->src_h != fb->height << 16) {
10424 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10425 		return -EINVAL;
10426 	}
10427 
10428 	/* Pitch in pixels */
10429 	pitch = fb->pitches[0] / fb->format->cpp[0];
10430 
10431 	if (fb->width != pitch) {
10432 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10433 				 fb->width, pitch);
10434 		return -EINVAL;
10435 	}
10436 
10437 	switch (pitch) {
10438 	case 64:
10439 	case 128:
10440 	case 256:
10441 		/* FB pitch is supported by cursor plane */
10442 		break;
10443 	default:
10444 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10445 		return -EINVAL;
10446 	}
10447 
10448 	/* Core DRM takes care of checking FB modifiers, so we only need to
10449 	 * check tiling flags when the FB doesn't have a modifier. */
10450 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10451 		if (adev->family < AMDGPU_FAMILY_AI) {
10452 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10453 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10454 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10455 		} else {
10456 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10457 		}
10458 		if (!linear) {
10459 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10460 			return -EINVAL;
10461 		}
10462 	}
10463 
10464 	return 0;
10465 }
10466 
10467 static int dm_update_plane_state(struct dc *dc,
10468 				 struct drm_atomic_state *state,
10469 				 struct drm_plane *plane,
10470 				 struct drm_plane_state *old_plane_state,
10471 				 struct drm_plane_state *new_plane_state,
10472 				 bool enable,
10473 				 bool *lock_and_validation_needed)
10474 {
10475 
10476 	struct dm_atomic_state *dm_state = NULL;
10477 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10478 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10479 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10480 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10481 	struct amdgpu_crtc *new_acrtc;
10482 	bool needs_reset;
10483 	int ret = 0;
10484 
10485 
10486 	new_plane_crtc = new_plane_state->crtc;
10487 	old_plane_crtc = old_plane_state->crtc;
10488 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10489 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10490 
10491 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10492 		if (!enable || !new_plane_crtc ||
10493 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10494 			return 0;
10495 
10496 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10497 
10498 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10499 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10500 			return -EINVAL;
10501 		}
10502 
10503 		if (new_plane_state->fb) {
10504 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10505 						 new_plane_state->fb);
10506 			if (ret)
10507 				return ret;
10508 		}
10509 
10510 		return 0;
10511 	}
10512 
10513 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10514 					 new_plane_state);
10515 
10516 	/* Remove any changed/removed planes */
10517 	if (!enable) {
10518 		if (!needs_reset)
10519 			return 0;
10520 
10521 		if (!old_plane_crtc)
10522 			return 0;
10523 
10524 		old_crtc_state = drm_atomic_get_old_crtc_state(
10525 				state, old_plane_crtc);
10526 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10527 
10528 		if (!dm_old_crtc_state->stream)
10529 			return 0;
10530 
10531 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10532 				plane->base.id, old_plane_crtc->base.id);
10533 
10534 		ret = dm_atomic_get_state(state, &dm_state);
10535 		if (ret)
10536 			return ret;
10537 
10538 		if (!dc_remove_plane_from_context(
10539 				dc,
10540 				dm_old_crtc_state->stream,
10541 				dm_old_plane_state->dc_state,
10542 				dm_state->context)) {
10543 
10544 			return -EINVAL;
10545 		}
10546 
10547 
10548 		dc_plane_state_release(dm_old_plane_state->dc_state);
10549 		dm_new_plane_state->dc_state = NULL;
10550 
10551 		*lock_and_validation_needed = true;
10552 
10553 	} else { /* Add new planes */
10554 		struct dc_plane_state *dc_new_plane_state;
10555 
10556 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10557 			return 0;
10558 
10559 		if (!new_plane_crtc)
10560 			return 0;
10561 
10562 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10563 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10564 
10565 		if (!dm_new_crtc_state->stream)
10566 			return 0;
10567 
10568 		if (!needs_reset)
10569 			return 0;
10570 
10571 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10572 		if (ret)
10573 			return ret;
10574 
10575 		WARN_ON(dm_new_plane_state->dc_state);
10576 
10577 		dc_new_plane_state = dc_create_plane_state(dc);
10578 		if (!dc_new_plane_state)
10579 			return -ENOMEM;
10580 
10581 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10582 				 plane->base.id, new_plane_crtc->base.id);
10583 
10584 		ret = fill_dc_plane_attributes(
10585 			drm_to_adev(new_plane_crtc->dev),
10586 			dc_new_plane_state,
10587 			new_plane_state,
10588 			new_crtc_state);
10589 		if (ret) {
10590 			dc_plane_state_release(dc_new_plane_state);
10591 			return ret;
10592 		}
10593 
10594 		ret = dm_atomic_get_state(state, &dm_state);
10595 		if (ret) {
10596 			dc_plane_state_release(dc_new_plane_state);
10597 			return ret;
10598 		}
10599 
10600 		/*
10601 		 * Any atomic check errors that occur after this will
10602 		 * not need a release. The plane state will be attached
10603 		 * to the stream, and therefore part of the atomic
10604 		 * state. It'll be released when the atomic state is
10605 		 * cleaned.
10606 		 */
10607 		if (!dc_add_plane_to_context(
10608 				dc,
10609 				dm_new_crtc_state->stream,
10610 				dc_new_plane_state,
10611 				dm_state->context)) {
10612 
10613 			dc_plane_state_release(dc_new_plane_state);
10614 			return -EINVAL;
10615 		}
10616 
10617 		dm_new_plane_state->dc_state = dc_new_plane_state;
10618 
10619 		/* Tell DC to do a full surface update every time there
10620 		 * is a plane change. Inefficient, but works for now.
10621 		 */
10622 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10623 
10624 		*lock_and_validation_needed = true;
10625 	}
10626 
10627 
10628 	return ret;
10629 }
10630 
10631 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10632 				struct drm_crtc *crtc,
10633 				struct drm_crtc_state *new_crtc_state)
10634 {
10635 	struct drm_plane *cursor = crtc->cursor, *underlying;
10636 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10637 	int i;
10638 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10639 
10640 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10641 	 * cursor per pipe but it's going to inherit the scaling and
10642 	 * positioning from the underlying pipe. Check the cursor plane's
10643 	 * blending properties match the underlying planes'. */
10644 
10645 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10646 	if (!new_cursor_state || !new_cursor_state->fb) {
10647 		return 0;
10648 	}
10649 
10650 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10651 			 (new_cursor_state->src_w >> 16);
10652 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10653 			 (new_cursor_state->src_h >> 16);
10654 
10655 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10656 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10657 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10658 			continue;
10659 
10660 		/* Ignore disabled planes */
10661 		if (!new_underlying_state->fb)
10662 			continue;
10663 
10664 		underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10665 				     (new_underlying_state->src_w >> 16);
10666 		underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10667 				     (new_underlying_state->src_h >> 16);
10668 
10669 		if (cursor_scale_w != underlying_scale_w ||
10670 		    cursor_scale_h != underlying_scale_h) {
10671 			drm_dbg_atomic(crtc->dev,
10672 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10673 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10674 			return -EINVAL;
10675 		}
10676 
10677 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10678 		if (new_underlying_state->crtc_x <= 0 &&
10679 		    new_underlying_state->crtc_y <= 0 &&
10680 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10681 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10682 			break;
10683 	}
10684 
10685 	return 0;
10686 }
10687 
10688 #if defined(CONFIG_DRM_AMD_DC_DCN)
10689 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10690 {
10691 	struct drm_connector *connector;
10692 	struct drm_connector_state *conn_state;
10693 	struct amdgpu_dm_connector *aconnector = NULL;
10694 	int i;
10695 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10696 		if (conn_state->crtc != crtc)
10697 			continue;
10698 
10699 		aconnector = to_amdgpu_dm_connector(connector);
10700 		if (!aconnector->port || !aconnector->mst_port)
10701 			aconnector = NULL;
10702 		else
10703 			break;
10704 	}
10705 
10706 	if (!aconnector)
10707 		return 0;
10708 
10709 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10710 }
10711 #endif
10712 
10713 /**
10714  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10715  * @dev: The DRM device
10716  * @state: The atomic state to commit
10717  *
10718  * Validate that the given atomic state is programmable by DC into hardware.
10719  * This involves constructing a &struct dc_state reflecting the new hardware
10720  * state we wish to commit, then querying DC to see if it is programmable. It's
10721  * important not to modify the existing DC state. Otherwise, atomic_check
10722  * may unexpectedly commit hardware changes.
10723  *
10724  * When validating the DC state, it's important that the right locks are
10725  * acquired. For full updates case which removes/adds/updates streams on one
10726  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10727  * that any such full update commit will wait for completion of any outstanding
10728  * flip using DRMs synchronization events.
10729  *
10730  * Note that DM adds the affected connectors for all CRTCs in state, when that
10731  * might not seem necessary. This is because DC stream creation requires the
10732  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10733  * be possible but non-trivial - a possible TODO item.
10734  *
10735  * Return: -Error code if validation failed.
10736  */
10737 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10738 				  struct drm_atomic_state *state)
10739 {
10740 	struct amdgpu_device *adev = drm_to_adev(dev);
10741 	struct dm_atomic_state *dm_state = NULL;
10742 	struct dc *dc = adev->dm.dc;
10743 	struct drm_connector *connector;
10744 	struct drm_connector_state *old_con_state, *new_con_state;
10745 	struct drm_crtc *crtc;
10746 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10747 	struct drm_plane *plane;
10748 	struct drm_plane_state *old_plane_state, *new_plane_state;
10749 	enum dc_status status;
10750 	int ret, i;
10751 	bool lock_and_validation_needed = false;
10752 	struct dm_crtc_state *dm_old_crtc_state;
10753 #if defined(CONFIG_DRM_AMD_DC_DCN)
10754 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10755 	struct drm_dp_mst_topology_state *mst_state;
10756 	struct drm_dp_mst_topology_mgr *mgr;
10757 #endif
10758 
10759 	trace_amdgpu_dm_atomic_check_begin(state);
10760 
10761 	ret = drm_atomic_helper_check_modeset(dev, state);
10762 	if (ret)
10763 		goto fail;
10764 
10765 	/* Check connector changes */
10766 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10767 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10768 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10769 
10770 		/* Skip connectors that are disabled or part of modeset already. */
10771 		if (!old_con_state->crtc && !new_con_state->crtc)
10772 			continue;
10773 
10774 		if (!new_con_state->crtc)
10775 			continue;
10776 
10777 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10778 		if (IS_ERR(new_crtc_state)) {
10779 			ret = PTR_ERR(new_crtc_state);
10780 			goto fail;
10781 		}
10782 
10783 		if (dm_old_con_state->abm_level !=
10784 		    dm_new_con_state->abm_level)
10785 			new_crtc_state->connectors_changed = true;
10786 	}
10787 
10788 #if defined(CONFIG_DRM_AMD_DC_DCN)
10789 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10790 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10791 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10792 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10793 				if (ret)
10794 					goto fail;
10795 			}
10796 		}
10797 	}
10798 #endif
10799 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10800 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10801 
10802 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10803 		    !new_crtc_state->color_mgmt_changed &&
10804 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10805 			dm_old_crtc_state->dsc_force_changed == false)
10806 			continue;
10807 
10808 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10809 		if (ret)
10810 			goto fail;
10811 
10812 		if (!new_crtc_state->enable)
10813 			continue;
10814 
10815 		ret = drm_atomic_add_affected_connectors(state, crtc);
10816 		if (ret)
10817 			goto fail;
10818 
10819 		ret = drm_atomic_add_affected_planes(state, crtc);
10820 		if (ret)
10821 			goto fail;
10822 
10823 		if (dm_old_crtc_state->dsc_force_changed)
10824 			new_crtc_state->mode_changed = true;
10825 	}
10826 
10827 	/*
10828 	 * Add all primary and overlay planes on the CRTC to the state
10829 	 * whenever a plane is enabled to maintain correct z-ordering
10830 	 * and to enable fast surface updates.
10831 	 */
10832 	drm_for_each_crtc(crtc, dev) {
10833 		bool modified = false;
10834 
10835 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10836 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10837 				continue;
10838 
10839 			if (new_plane_state->crtc == crtc ||
10840 			    old_plane_state->crtc == crtc) {
10841 				modified = true;
10842 				break;
10843 			}
10844 		}
10845 
10846 		if (!modified)
10847 			continue;
10848 
10849 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10850 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10851 				continue;
10852 
10853 			new_plane_state =
10854 				drm_atomic_get_plane_state(state, plane);
10855 
10856 			if (IS_ERR(new_plane_state)) {
10857 				ret = PTR_ERR(new_plane_state);
10858 				goto fail;
10859 			}
10860 		}
10861 	}
10862 
10863 	/* Remove exiting planes if they are modified */
10864 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10865 		ret = dm_update_plane_state(dc, state, plane,
10866 					    old_plane_state,
10867 					    new_plane_state,
10868 					    false,
10869 					    &lock_and_validation_needed);
10870 		if (ret)
10871 			goto fail;
10872 	}
10873 
10874 	/* Disable all crtcs which require disable */
10875 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10876 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10877 					   old_crtc_state,
10878 					   new_crtc_state,
10879 					   false,
10880 					   &lock_and_validation_needed);
10881 		if (ret)
10882 			goto fail;
10883 	}
10884 
10885 	/* Enable all crtcs which require enable */
10886 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10887 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10888 					   old_crtc_state,
10889 					   new_crtc_state,
10890 					   true,
10891 					   &lock_and_validation_needed);
10892 		if (ret)
10893 			goto fail;
10894 	}
10895 
10896 	/* Add new/modified planes */
10897 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10898 		ret = dm_update_plane_state(dc, state, plane,
10899 					    old_plane_state,
10900 					    new_plane_state,
10901 					    true,
10902 					    &lock_and_validation_needed);
10903 		if (ret)
10904 			goto fail;
10905 	}
10906 
10907 	/* Run this here since we want to validate the streams we created */
10908 	ret = drm_atomic_helper_check_planes(dev, state);
10909 	if (ret)
10910 		goto fail;
10911 
10912 	/* Check cursor planes scaling */
10913 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10914 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10915 		if (ret)
10916 			goto fail;
10917 	}
10918 
10919 	if (state->legacy_cursor_update) {
10920 		/*
10921 		 * This is a fast cursor update coming from the plane update
10922 		 * helper, check if it can be done asynchronously for better
10923 		 * performance.
10924 		 */
10925 		state->async_update =
10926 			!drm_atomic_helper_async_check(dev, state);
10927 
10928 		/*
10929 		 * Skip the remaining global validation if this is an async
10930 		 * update. Cursor updates can be done without affecting
10931 		 * state or bandwidth calcs and this avoids the performance
10932 		 * penalty of locking the private state object and
10933 		 * allocating a new dc_state.
10934 		 */
10935 		if (state->async_update)
10936 			return 0;
10937 	}
10938 
10939 	/* Check scaling and underscan changes*/
10940 	/* TODO Removed scaling changes validation due to inability to commit
10941 	 * new stream into context w\o causing full reset. Need to
10942 	 * decide how to handle.
10943 	 */
10944 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10945 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10946 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10947 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10948 
10949 		/* Skip any modesets/resets */
10950 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10951 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10952 			continue;
10953 
10954 		/* Skip any thing not scale or underscan changes */
10955 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10956 			continue;
10957 
10958 		lock_and_validation_needed = true;
10959 	}
10960 
10961 #if defined(CONFIG_DRM_AMD_DC_DCN)
10962 	/* set the slot info for each mst_state based on the link encoding format */
10963 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10964 		struct amdgpu_dm_connector *aconnector;
10965 		struct drm_connector *connector;
10966 		struct drm_connector_list_iter iter;
10967 		u8 link_coding_cap;
10968 
10969 		if (!mgr->mst_state )
10970 			continue;
10971 
10972 		drm_connector_list_iter_begin(dev, &iter);
10973 		drm_for_each_connector_iter(connector, &iter) {
10974 			int id = connector->index;
10975 
10976 			if (id == mst_state->mgr->conn_base_id) {
10977 				aconnector = to_amdgpu_dm_connector(connector);
10978 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10979 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
10980 
10981 				break;
10982 			}
10983 		}
10984 		drm_connector_list_iter_end(&iter);
10985 
10986 	}
10987 #endif
10988 	/**
10989 	 * Streams and planes are reset when there are changes that affect
10990 	 * bandwidth. Anything that affects bandwidth needs to go through
10991 	 * DC global validation to ensure that the configuration can be applied
10992 	 * to hardware.
10993 	 *
10994 	 * We have to currently stall out here in atomic_check for outstanding
10995 	 * commits to finish in this case because our IRQ handlers reference
10996 	 * DRM state directly - we can end up disabling interrupts too early
10997 	 * if we don't.
10998 	 *
10999 	 * TODO: Remove this stall and drop DM state private objects.
11000 	 */
11001 	if (lock_and_validation_needed) {
11002 		ret = dm_atomic_get_state(state, &dm_state);
11003 		if (ret)
11004 			goto fail;
11005 
11006 		ret = do_aquire_global_lock(dev, state);
11007 		if (ret)
11008 			goto fail;
11009 
11010 #if defined(CONFIG_DRM_AMD_DC_DCN)
11011 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
11012 			goto fail;
11013 
11014 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11015 		if (ret)
11016 			goto fail;
11017 #endif
11018 
11019 		/*
11020 		 * Perform validation of MST topology in the state:
11021 		 * We need to perform MST atomic check before calling
11022 		 * dc_validate_global_state(), or there is a chance
11023 		 * to get stuck in an infinite loop and hang eventually.
11024 		 */
11025 		ret = drm_dp_mst_atomic_check(state);
11026 		if (ret)
11027 			goto fail;
11028 		status = dc_validate_global_state(dc, dm_state->context, false);
11029 		if (status != DC_OK) {
11030 			drm_dbg_atomic(dev,
11031 				       "DC global validation failure: %s (%d)",
11032 				       dc_status_to_str(status), status);
11033 			ret = -EINVAL;
11034 			goto fail;
11035 		}
11036 	} else {
11037 		/*
11038 		 * The commit is a fast update. Fast updates shouldn't change
11039 		 * the DC context, affect global validation, and can have their
11040 		 * commit work done in parallel with other commits not touching
11041 		 * the same resource. If we have a new DC context as part of
11042 		 * the DM atomic state from validation we need to free it and
11043 		 * retain the existing one instead.
11044 		 *
11045 		 * Furthermore, since the DM atomic state only contains the DC
11046 		 * context and can safely be annulled, we can free the state
11047 		 * and clear the associated private object now to free
11048 		 * some memory and avoid a possible use-after-free later.
11049 		 */
11050 
11051 		for (i = 0; i < state->num_private_objs; i++) {
11052 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11053 
11054 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11055 				int j = state->num_private_objs-1;
11056 
11057 				dm_atomic_destroy_state(obj,
11058 						state->private_objs[i].state);
11059 
11060 				/* If i is not at the end of the array then the
11061 				 * last element needs to be moved to where i was
11062 				 * before the array can safely be truncated.
11063 				 */
11064 				if (i != j)
11065 					state->private_objs[i] =
11066 						state->private_objs[j];
11067 
11068 				state->private_objs[j].ptr = NULL;
11069 				state->private_objs[j].state = NULL;
11070 				state->private_objs[j].old_state = NULL;
11071 				state->private_objs[j].new_state = NULL;
11072 
11073 				state->num_private_objs = j;
11074 				break;
11075 			}
11076 		}
11077 	}
11078 
11079 	/* Store the overall update type for use later in atomic check. */
11080 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11081 		struct dm_crtc_state *dm_new_crtc_state =
11082 			to_dm_crtc_state(new_crtc_state);
11083 
11084 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11085 							 UPDATE_TYPE_FULL :
11086 							 UPDATE_TYPE_FAST;
11087 	}
11088 
11089 	/* Must be success */
11090 	WARN_ON(ret);
11091 
11092 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11093 
11094 	return ret;
11095 
11096 fail:
11097 	if (ret == -EDEADLK)
11098 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11099 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11100 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11101 	else
11102 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11103 
11104 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11105 
11106 	return ret;
11107 }
11108 
11109 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11110 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11111 {
11112 	uint8_t dpcd_data;
11113 	bool capable = false;
11114 
11115 	if (amdgpu_dm_connector->dc_link &&
11116 		dm_helpers_dp_read_dpcd(
11117 				NULL,
11118 				amdgpu_dm_connector->dc_link,
11119 				DP_DOWN_STREAM_PORT_COUNT,
11120 				&dpcd_data,
11121 				sizeof(dpcd_data))) {
11122 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11123 	}
11124 
11125 	return capable;
11126 }
11127 
11128 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11129 		unsigned int offset,
11130 		unsigned int total_length,
11131 		uint8_t *data,
11132 		unsigned int length,
11133 		struct amdgpu_hdmi_vsdb_info *vsdb)
11134 {
11135 	bool res;
11136 	union dmub_rb_cmd cmd;
11137 	struct dmub_cmd_send_edid_cea *input;
11138 	struct dmub_cmd_edid_cea_output *output;
11139 
11140 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11141 		return false;
11142 
11143 	memset(&cmd, 0, sizeof(cmd));
11144 
11145 	input = &cmd.edid_cea.data.input;
11146 
11147 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11148 	cmd.edid_cea.header.sub_type = 0;
11149 	cmd.edid_cea.header.payload_bytes =
11150 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11151 	input->offset = offset;
11152 	input->length = length;
11153 	input->total_length = total_length;
11154 	memcpy(input->payload, data, length);
11155 
11156 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11157 	if (!res) {
11158 		DRM_ERROR("EDID CEA parser failed\n");
11159 		return false;
11160 	}
11161 
11162 	output = &cmd.edid_cea.data.output;
11163 
11164 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11165 		if (!output->ack.success) {
11166 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11167 					output->ack.offset);
11168 		}
11169 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11170 		if (!output->amd_vsdb.vsdb_found)
11171 			return false;
11172 
11173 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11174 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11175 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11176 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11177 	} else {
11178 		DRM_WARN("Unknown EDID CEA parser results\n");
11179 		return false;
11180 	}
11181 
11182 	return true;
11183 }
11184 
11185 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11186 		uint8_t *edid_ext, int len,
11187 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11188 {
11189 	int i;
11190 
11191 	/* send extension block to DMCU for parsing */
11192 	for (i = 0; i < len; i += 8) {
11193 		bool res;
11194 		int offset;
11195 
11196 		/* send 8 bytes a time */
11197 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11198 			return false;
11199 
11200 		if (i+8 == len) {
11201 			/* EDID block sent completed, expect result */
11202 			int version, min_rate, max_rate;
11203 
11204 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11205 			if (res) {
11206 				/* amd vsdb found */
11207 				vsdb_info->freesync_supported = 1;
11208 				vsdb_info->amd_vsdb_version = version;
11209 				vsdb_info->min_refresh_rate_hz = min_rate;
11210 				vsdb_info->max_refresh_rate_hz = max_rate;
11211 				return true;
11212 			}
11213 			/* not amd vsdb */
11214 			return false;
11215 		}
11216 
11217 		/* check for ack*/
11218 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11219 		if (!res)
11220 			return false;
11221 	}
11222 
11223 	return false;
11224 }
11225 
11226 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11227 		uint8_t *edid_ext, int len,
11228 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11229 {
11230 	int i;
11231 
11232 	/* send extension block to DMCU for parsing */
11233 	for (i = 0; i < len; i += 8) {
11234 		/* send 8 bytes a time */
11235 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11236 			return false;
11237 	}
11238 
11239 	return vsdb_info->freesync_supported;
11240 }
11241 
11242 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11243 		uint8_t *edid_ext, int len,
11244 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11245 {
11246 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11247 
11248 	if (adev->dm.dmub_srv)
11249 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11250 	else
11251 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11252 }
11253 
11254 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11255 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11256 {
11257 	uint8_t *edid_ext = NULL;
11258 	int i;
11259 	bool valid_vsdb_found = false;
11260 
11261 	/*----- drm_find_cea_extension() -----*/
11262 	/* No EDID or EDID extensions */
11263 	if (edid == NULL || edid->extensions == 0)
11264 		return -ENODEV;
11265 
11266 	/* Find CEA extension */
11267 	for (i = 0; i < edid->extensions; i++) {
11268 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11269 		if (edid_ext[0] == CEA_EXT)
11270 			break;
11271 	}
11272 
11273 	if (i == edid->extensions)
11274 		return -ENODEV;
11275 
11276 	/*----- cea_db_offsets() -----*/
11277 	if (edid_ext[0] != CEA_EXT)
11278 		return -ENODEV;
11279 
11280 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11281 
11282 	return valid_vsdb_found ? i : -ENODEV;
11283 }
11284 
11285 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11286 					struct edid *edid)
11287 {
11288 	int i = 0;
11289 	struct detailed_timing *timing;
11290 	struct detailed_non_pixel *data;
11291 	struct detailed_data_monitor_range *range;
11292 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11293 			to_amdgpu_dm_connector(connector);
11294 	struct dm_connector_state *dm_con_state = NULL;
11295 	struct dc_sink *sink;
11296 
11297 	struct drm_device *dev = connector->dev;
11298 	struct amdgpu_device *adev = drm_to_adev(dev);
11299 	bool freesync_capable = false;
11300 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11301 
11302 	if (!connector->state) {
11303 		DRM_ERROR("%s - Connector has no state", __func__);
11304 		goto update;
11305 	}
11306 
11307 	sink = amdgpu_dm_connector->dc_sink ?
11308 		amdgpu_dm_connector->dc_sink :
11309 		amdgpu_dm_connector->dc_em_sink;
11310 
11311 	if (!edid || !sink) {
11312 		dm_con_state = to_dm_connector_state(connector->state);
11313 
11314 		amdgpu_dm_connector->min_vfreq = 0;
11315 		amdgpu_dm_connector->max_vfreq = 0;
11316 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11317 		connector->display_info.monitor_range.min_vfreq = 0;
11318 		connector->display_info.monitor_range.max_vfreq = 0;
11319 		freesync_capable = false;
11320 
11321 		goto update;
11322 	}
11323 
11324 	dm_con_state = to_dm_connector_state(connector->state);
11325 
11326 	if (!adev->dm.freesync_module)
11327 		goto update;
11328 
11329 
11330 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11331 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11332 		bool edid_check_required = false;
11333 
11334 		if (edid) {
11335 			edid_check_required = is_dp_capable_without_timing_msa(
11336 						adev->dm.dc,
11337 						amdgpu_dm_connector);
11338 		}
11339 
11340 		if (edid_check_required == true && (edid->version > 1 ||
11341 		   (edid->version == 1 && edid->revision > 1))) {
11342 			for (i = 0; i < 4; i++) {
11343 
11344 				timing	= &edid->detailed_timings[i];
11345 				data	= &timing->data.other_data;
11346 				range	= &data->data.range;
11347 				/*
11348 				 * Check if monitor has continuous frequency mode
11349 				 */
11350 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11351 					continue;
11352 				/*
11353 				 * Check for flag range limits only. If flag == 1 then
11354 				 * no additional timing information provided.
11355 				 * Default GTF, GTF Secondary curve and CVT are not
11356 				 * supported
11357 				 */
11358 				if (range->flags != 1)
11359 					continue;
11360 
11361 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11362 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11363 				amdgpu_dm_connector->pixel_clock_mhz =
11364 					range->pixel_clock_mhz * 10;
11365 
11366 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11367 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11368 
11369 				break;
11370 			}
11371 
11372 			if (amdgpu_dm_connector->max_vfreq -
11373 			    amdgpu_dm_connector->min_vfreq > 10) {
11374 
11375 				freesync_capable = true;
11376 			}
11377 		}
11378 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11379 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11380 		if (i >= 0 && vsdb_info.freesync_supported) {
11381 			timing  = &edid->detailed_timings[i];
11382 			data    = &timing->data.other_data;
11383 
11384 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11385 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11386 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11387 				freesync_capable = true;
11388 
11389 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11390 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11391 		}
11392 	}
11393 
11394 update:
11395 	if (dm_con_state)
11396 		dm_con_state->freesync_capable = freesync_capable;
11397 
11398 	if (connector->vrr_capable_property)
11399 		drm_connector_set_vrr_capable_property(connector,
11400 						       freesync_capable);
11401 }
11402 
11403 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11404 {
11405 	struct amdgpu_device *adev = drm_to_adev(dev);
11406 	struct dc *dc = adev->dm.dc;
11407 	int i;
11408 
11409 	mutex_lock(&adev->dm.dc_lock);
11410 	if (dc->current_state) {
11411 		for (i = 0; i < dc->current_state->stream_count; ++i)
11412 			dc->current_state->streams[i]
11413 				->triggered_crtc_reset.enabled =
11414 				adev->dm.force_timing_sync;
11415 
11416 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11417 		dc_trigger_sync(dc, dc->current_state);
11418 	}
11419 	mutex_unlock(&adev->dm.dc_lock);
11420 }
11421 
11422 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11423 		       uint32_t value, const char *func_name)
11424 {
11425 #ifdef DM_CHECK_ADDR_0
11426 	if (address == 0) {
11427 		DC_ERR("invalid register write. address = 0");
11428 		return;
11429 	}
11430 #endif
11431 	cgs_write_register(ctx->cgs_device, address, value);
11432 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11433 }
11434 
11435 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11436 			  const char *func_name)
11437 {
11438 	uint32_t value;
11439 #ifdef DM_CHECK_ADDR_0
11440 	if (address == 0) {
11441 		DC_ERR("invalid register read; address = 0\n");
11442 		return 0;
11443 	}
11444 #endif
11445 
11446 	if (ctx->dmub_srv &&
11447 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11448 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11449 		ASSERT(false);
11450 		return 0;
11451 	}
11452 
11453 	value = cgs_read_register(ctx->cgs_device, address);
11454 
11455 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11456 
11457 	return value;
11458 }
11459 
11460 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11461 	uint8_t status_type, uint32_t *operation_result)
11462 {
11463 	struct amdgpu_device *adev = ctx->driver_context;
11464 	int return_status = -1;
11465 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11466 
11467 	if (is_cmd_aux) {
11468 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11469 			return_status = p_notify->aux_reply.length;
11470 			*operation_result = p_notify->result;
11471 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11472 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11473 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11474 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11475 		} else {
11476 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11477 		}
11478 	} else {
11479 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11480 			return_status = 0;
11481 			*operation_result = p_notify->sc_status;
11482 		} else {
11483 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11484 		}
11485 	}
11486 
11487 	return return_status;
11488 }
11489 
11490 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11491 	unsigned int link_index, void *cmd_payload, void *operation_result)
11492 {
11493 	struct amdgpu_device *adev = ctx->driver_context;
11494 	int ret = 0;
11495 
11496 	if (is_cmd_aux) {
11497 		dc_process_dmub_aux_transfer_async(ctx->dc,
11498 			link_index, (struct aux_payload *)cmd_payload);
11499 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11500 					(struct set_config_cmd_payload *)cmd_payload,
11501 					adev->dm.dmub_notify)) {
11502 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11503 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11504 					(uint32_t *)operation_result);
11505 	}
11506 
11507 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11508 	if (ret == 0) {
11509 		DRM_ERROR("wait_for_completion_timeout timeout!");
11510 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11511 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11512 				(uint32_t *)operation_result);
11513 	}
11514 
11515 	if (is_cmd_aux) {
11516 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11517 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11518 
11519 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11520 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11521 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11522 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11523 				       adev->dm.dmub_notify->aux_reply.length);
11524 			}
11525 		}
11526 	}
11527 
11528 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11529 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11530 			(uint32_t *)operation_result);
11531 }
11532