1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85 
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93 
94 #include "soc15_common.h"
95 #endif
96 
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100 
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 
118 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120 
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123 
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126 
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129 
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139 
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144 
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147 	switch (link->dpcd_caps.dongle_type) {
148 	case DISPLAY_DONGLE_NONE:
149 		return DRM_MODE_SUBCONNECTOR_Native;
150 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 		return DRM_MODE_SUBCONNECTOR_VGA;
152 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 		return DRM_MODE_SUBCONNECTOR_DVID;
155 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 		return DRM_MODE_SUBCONNECTOR_HDMIA;
158 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 	default:
160 		return DRM_MODE_SUBCONNECTOR_Unknown;
161 	}
162 }
163 
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166 	struct dc_link *link = aconnector->dc_link;
167 	struct drm_connector *connector = &aconnector->base;
168 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169 
170 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 		return;
172 
173 	if (aconnector->dc_sink)
174 		subconnector = get_subconnector_type(link);
175 
176 	drm_object_property_set_value(&connector->base,
177 			connector->dev->mode_config.dp_subconnector_property,
178 			subconnector);
179 }
180 
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191 
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193 				struct drm_plane *plane,
194 				unsigned long possible_crtcs,
195 				const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 			       struct drm_plane *plane,
198 			       uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
201 				    uint32_t link_index,
202 				    struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 				  struct amdgpu_encoder *aencoder,
205 				  uint32_t link_index);
206 
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208 
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210 
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 				  struct drm_atomic_state *state);
213 
214 static void handle_cursor_update(struct drm_plane *plane,
215 				 struct drm_plane_state *old_plane_state);
216 
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
222 
223 static bool
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 				 struct drm_crtc_state *new_crtc_state);
226 /*
227  * dm_vblank_get_counter
228  *
229  * @brief
230  * Get counter for number of vertical blanks
231  *
232  * @param
233  * struct amdgpu_device *adev - [in] desired amdgpu device
234  * int disp_idx - [in] which CRTC to get the counter from
235  *
236  * @return
237  * Counter for vertical blanks
238  */
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240 {
241 	if (crtc >= adev->mode_info.num_crtc)
242 		return 0;
243 	else {
244 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245 
246 		if (acrtc->dm_irq_params.stream == NULL) {
247 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248 				  crtc);
249 			return 0;
250 		}
251 
252 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253 	}
254 }
255 
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 				  u32 *vbl, u32 *position)
258 {
259 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
260 
261 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 		return -EINVAL;
263 	else {
264 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265 
266 		if (acrtc->dm_irq_params.stream ==  NULL) {
267 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 				  crtc);
269 			return 0;
270 		}
271 
272 		/*
273 		 * TODO rework base driver to use values directly.
274 		 * for now parse it back into reg-format
275 		 */
276 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277 					 &v_blank_start,
278 					 &v_blank_end,
279 					 &h_position,
280 					 &v_position);
281 
282 		*position = v_position | (h_position << 16);
283 		*vbl = v_blank_start | (v_blank_end << 16);
284 	}
285 
286 	return 0;
287 }
288 
289 static bool dm_is_idle(void *handle)
290 {
291 	/* XXX todo */
292 	return true;
293 }
294 
295 static int dm_wait_for_idle(void *handle)
296 {
297 	/* XXX todo */
298 	return 0;
299 }
300 
301 static bool dm_check_soft_reset(void *handle)
302 {
303 	return false;
304 }
305 
306 static int dm_soft_reset(void *handle)
307 {
308 	/* XXX todo */
309 	return 0;
310 }
311 
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 		     int otg_inst)
315 {
316 	struct drm_device *dev = adev_to_drm(adev);
317 	struct drm_crtc *crtc;
318 	struct amdgpu_crtc *amdgpu_crtc;
319 
320 	if (WARN_ON(otg_inst == -1))
321 		return adev->mode_info.crtcs[0];
322 
323 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 		amdgpu_crtc = to_amdgpu_crtc(crtc);
325 
326 		if (amdgpu_crtc->otg_inst == otg_inst)
327 			return amdgpu_crtc;
328 	}
329 
330 	return NULL;
331 }
332 
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335 	return acrtc->dm_irq_params.freesync_config.state ==
336 		       VRR_STATE_ACTIVE_VARIABLE ||
337 	       acrtc->dm_irq_params.freesync_config.state ==
338 		       VRR_STATE_ACTIVE_FIXED;
339 }
340 
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346 
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 					      struct dm_crtc_state *new_state)
349 {
350 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351 		return true;
352 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 		return true;
354 	else
355 		return false;
356 }
357 
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367 	struct amdgpu_crtc *amdgpu_crtc;
368 	struct common_irq_params *irq_params = interrupt_params;
369 	struct amdgpu_device *adev = irq_params->adev;
370 	unsigned long flags;
371 	struct drm_pending_vblank_event *e;
372 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 	bool vrr_active;
374 
375 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376 
377 	/* IRQ could occur when in initial stage */
378 	/* TODO work and BO cleanup */
379 	if (amdgpu_crtc == NULL) {
380 		DC_LOG_PFLIP("CRTC is null, returning.\n");
381 		return;
382 	}
383 
384 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385 
386 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388 						 amdgpu_crtc->pflip_status,
389 						 AMDGPU_FLIP_SUBMITTED,
390 						 amdgpu_crtc->crtc_id,
391 						 amdgpu_crtc);
392 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393 		return;
394 	}
395 
396 	/* page flip completed. */
397 	e = amdgpu_crtc->event;
398 	amdgpu_crtc->event = NULL;
399 
400 	WARN_ON(!e);
401 
402 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403 
404 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
405 	if (!vrr_active ||
406 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 				      &v_blank_end, &hpos, &vpos) ||
408 	    (vpos < v_blank_start)) {
409 		/* Update to correct count and vblank timestamp if racing with
410 		 * vblank irq. This also updates to the correct vblank timestamp
411 		 * even in VRR mode, as scanout is past the front-porch atm.
412 		 */
413 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414 
415 		/* Wake up userspace by sending the pageflip event with proper
416 		 * count and timestamp of vblank of flip completion.
417 		 */
418 		if (e) {
419 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420 
421 			/* Event sent, so done with vblank for this flip */
422 			drm_crtc_vblank_put(&amdgpu_crtc->base);
423 		}
424 	} else if (e) {
425 		/* VRR active and inside front-porch: vblank count and
426 		 * timestamp for pageflip event will only be up to date after
427 		 * drm_crtc_handle_vblank() has been executed from late vblank
428 		 * irq handler after start of back-porch (vline 0). We queue the
429 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 		 * updated timestamp and count, once it runs after us.
431 		 *
432 		 * We need to open-code this instead of using the helper
433 		 * drm_crtc_arm_vblank_event(), as that helper would
434 		 * call drm_crtc_accurate_vblank_count(), which we must
435 		 * not call in VRR mode while we are in front-porch!
436 		 */
437 
438 		/* sequence will be replaced by real count during send-out. */
439 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 		e->pipe = amdgpu_crtc->crtc_id;
441 
442 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443 		e = NULL;
444 	}
445 
446 	/* Keep track of vblank of this flip for flip throttling. We use the
447 	 * cooked hw counter, as that one incremented at start of this vblank
448 	 * of pageflip completion, so last_flip_vblank is the forbidden count
449 	 * for queueing new pageflips if vsync + VRR is enabled.
450 	 */
451 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453 
454 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456 
457 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
459 		     vrr_active, (int) !e);
460 }
461 
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464 	struct common_irq_params *irq_params = interrupt_params;
465 	struct amdgpu_device *adev = irq_params->adev;
466 	struct amdgpu_crtc *acrtc;
467 	struct drm_device *drm_dev;
468 	struct drm_vblank_crtc *vblank;
469 	ktime_t frame_duration_ns, previous_timestamp;
470 	unsigned long flags;
471 	int vrr_active;
472 
473 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474 
475 	if (acrtc) {
476 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 		drm_dev = acrtc->base.dev;
478 		vblank = &drm_dev->vblank[acrtc->base.index];
479 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 		frame_duration_ns = vblank->time - previous_timestamp;
481 
482 		if (frame_duration_ns > 0) {
483 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
484 						frame_duration_ns,
485 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 		}
488 
489 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490 			      acrtc->crtc_id,
491 			      vrr_active);
492 
493 		/* Core vblank handling is done here after end of front-porch in
494 		 * vrr mode, as vblank timestamping will give valid results
495 		 * while now done after front-porch. This will also deliver
496 		 * page-flip completion events that have been queued to us
497 		 * if a pageflip happened inside front-porch.
498 		 */
499 		if (vrr_active) {
500 			drm_crtc_handle_vblank(&acrtc->base);
501 
502 			/* BTR processing for pre-DCE12 ASICs */
503 			if (acrtc->dm_irq_params.stream &&
504 			    adev->family < AMDGPU_FAMILY_AI) {
505 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 				mod_freesync_handle_v_update(
507 				    adev->dm.freesync_module,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params);
510 
511 				dc_stream_adjust_vmin_vmax(
512 				    adev->dm.dc,
513 				    acrtc->dm_irq_params.stream,
514 				    &acrtc->dm_irq_params.vrr_params.adjust);
515 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516 			}
517 		}
518 	}
519 }
520 
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530 	struct common_irq_params *irq_params = interrupt_params;
531 	struct amdgpu_device *adev = irq_params->adev;
532 	struct amdgpu_crtc *acrtc;
533 	unsigned long flags;
534 	int vrr_active;
535 
536 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537 	if (!acrtc)
538 		return;
539 
540 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541 
542 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 		      vrr_active, acrtc->dm_irq_params.active_planes);
544 
545 	/**
546 	 * Core vblank handling at start of front-porch is only possible
547 	 * in non-vrr mode, as only there vblank timestamping will give
548 	 * valid results while done in front-porch. Otherwise defer it
549 	 * to dm_vupdate_high_irq after end of front-porch.
550 	 */
551 	if (!vrr_active)
552 		drm_crtc_handle_vblank(&acrtc->base);
553 
554 	/**
555 	 * Following stuff must happen at start of vblank, for crc
556 	 * computation and below-the-range btr support in vrr mode.
557 	 */
558 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559 
560 	/* BTR updates need to happen before VUPDATE on Vega and above. */
561 	if (adev->family < AMDGPU_FAMILY_AI)
562 		return;
563 
564 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565 
566 	if (acrtc->dm_irq_params.stream &&
567 	    acrtc->dm_irq_params.vrr_params.supported &&
568 	    acrtc->dm_irq_params.freesync_config.state ==
569 		    VRR_STATE_ACTIVE_VARIABLE) {
570 		mod_freesync_handle_v_update(adev->dm.freesync_module,
571 					     acrtc->dm_irq_params.stream,
572 					     &acrtc->dm_irq_params.vrr_params);
573 
574 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 					   &acrtc->dm_irq_params.vrr_params.adjust);
576 	}
577 
578 	/*
579 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 	 * In that case, pageflip completion interrupts won't fire and pageflip
581 	 * completion events won't get delivered. Prevent this by sending
582 	 * pending pageflip events from here if a flip is still pending.
583 	 *
584 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 	 * avoid race conditions between flip programming and completion,
586 	 * which could cause too early flip completion events.
587 	 */
588 	if (adev->family >= AMDGPU_FAMILY_RV &&
589 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 	    acrtc->dm_irq_params.active_planes == 0) {
591 		if (acrtc->event) {
592 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593 			acrtc->event = NULL;
594 			drm_crtc_vblank_put(&acrtc->base);
595 		}
596 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 	}
598 
599 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601 
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt_params: interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613 	struct common_irq_params *irq_params = interrupt_params;
614 	struct amdgpu_device *adev = irq_params->adev;
615 	struct amdgpu_crtc *acrtc;
616 
617 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618 
619 	if (!acrtc)
620 		return;
621 
622 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625 
626 /**
627  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628  * @adev: amdgpu_device pointer
629  * @notify: dmub notification structure
630  *
631  * Dmub AUX or SET_CONFIG command completion processing callback
632  * Copies dmub notification to DM which is to be read by AUX command.
633  * issuing thread and also signals the event to wake up the thread.
634  */
635 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
636 {
637 	if (adev->dm.dmub_notify)
638 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640 		complete(&adev->dm.dmub_aux_transfer_done);
641 }
642 
643 /**
644  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645  * @adev: amdgpu_device pointer
646  * @notify: dmub notification structure
647  *
648  * Dmub Hpd interrupt processing callback. Gets displayindex through the
649  * ink index and calls helper to do the processing.
650  */
651 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
652 {
653 	struct amdgpu_dm_connector *aconnector;
654 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
655 	struct drm_connector *connector;
656 	struct drm_connector_list_iter iter;
657 	struct dc_link *link;
658 	uint8_t link_index = 0;
659 	struct drm_device *dev = adev->dm.ddev;
660 
661 	if (adev == NULL)
662 		return;
663 
664 	if (notify == NULL) {
665 		DRM_ERROR("DMUB HPD callback notification was NULL");
666 		return;
667 	}
668 
669 	if (notify->link_index > adev->dm.dc->link_count) {
670 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671 		return;
672 	}
673 
674 	link_index = notify->link_index;
675 	link = adev->dm.dc->links[link_index];
676 
677 	drm_connector_list_iter_begin(dev, &iter);
678 	drm_for_each_connector_iter(connector, &iter) {
679 		aconnector = to_amdgpu_dm_connector(connector);
680 		if (link && aconnector->dc_link == link) {
681 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
682 			hpd_aconnector = aconnector;
683 			break;
684 		}
685 	}
686 	drm_connector_list_iter_end(&iter);
687 
688 	if (hpd_aconnector) {
689 		if (notify->type == DMUB_NOTIFICATION_HPD)
690 			handle_hpd_irq_helper(hpd_aconnector);
691 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692 			handle_hpd_rx_irq(hpd_aconnector);
693 	}
694 }
695 
696 /**
697  * register_dmub_notify_callback - Sets callback for DMUB notify
698  * @adev: amdgpu_device pointer
699  * @type: Type of dmub notification
700  * @callback: Dmub interrupt callback function
701  * @dmub_int_thread_offload: offload indicator
702  *
703  * API to register a dmub callback handler for a dmub notification
704  * Also sets indicator whether callback processing to be offloaded.
705  * to dmub interrupt handling thread
706  * Return: true if successfully registered, false if there is existing registration
707  */
708 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
710 {
711 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712 		adev->dm.dmub_callback[type] = callback;
713 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
714 	} else
715 		return false;
716 
717 	return true;
718 }
719 
720 static void dm_handle_hpd_work(struct work_struct *work)
721 {
722 	struct dmub_hpd_work *dmub_hpd_wrk;
723 
724 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
725 
726 	if (!dmub_hpd_wrk->dmub_notify) {
727 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
728 		return;
729 	}
730 
731 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733 		dmub_hpd_wrk->dmub_notify);
734 	}
735 
736 	kfree(dmub_hpd_wrk->dmub_notify);
737 	kfree(dmub_hpd_wrk);
738 
739 }
740 
741 #define DMUB_TRACE_MAX_READ 64
742 /**
743  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744  * @interrupt_params: used for determining the Outbox instance
745  *
746  * Handles the Outbox Interrupt
747  * event handler.
748  */
749 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
750 {
751 	struct dmub_notification notify;
752 	struct common_irq_params *irq_params = interrupt_params;
753 	struct amdgpu_device *adev = irq_params->adev;
754 	struct amdgpu_display_manager *dm = &adev->dm;
755 	struct dmcub_trace_buf_entry entry = { 0 };
756 	uint32_t count = 0;
757 	struct dmub_hpd_work *dmub_hpd_wrk;
758 	struct dc_link *plink = NULL;
759 
760 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
761 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
762 
763 		do {
764 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
765 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
767 				continue;
768 			}
769 			if (!dm->dmub_callback[notify.type]) {
770 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
771 				continue;
772 			}
773 			if (dm->dmub_thread_offload[notify.type] == true) {
774 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
775 				if (!dmub_hpd_wrk) {
776 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
777 					return;
778 				}
779 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780 				if (!dmub_hpd_wrk->dmub_notify) {
781 					kfree(dmub_hpd_wrk);
782 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
783 					return;
784 				}
785 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786 				if (dmub_hpd_wrk->dmub_notify)
787 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
788 				dmub_hpd_wrk->adev = adev;
789 				if (notify.type == DMUB_NOTIFICATION_HPD) {
790 					plink = adev->dm.dc->links[notify.link_index];
791 					if (plink) {
792 						plink->hpd_status =
793 							notify.hpd_status == DP_HPD_PLUG;
794 					}
795 				}
796 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
797 			} else {
798 				dm->dmub_callback[notify.type](adev, &notify);
799 			}
800 		} while (notify.pending_notification);
801 	}
802 
803 
804 	do {
805 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
806 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
807 							entry.param0, entry.param1);
808 
809 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
810 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
811 		} else
812 			break;
813 
814 		count++;
815 
816 	} while (count <= DMUB_TRACE_MAX_READ);
817 
818 	if (count > DMUB_TRACE_MAX_READ)
819 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
820 }
821 #endif /* CONFIG_DRM_AMD_DC_DCN */
822 
823 static int dm_set_clockgating_state(void *handle,
824 		  enum amd_clockgating_state state)
825 {
826 	return 0;
827 }
828 
829 static int dm_set_powergating_state(void *handle,
830 		  enum amd_powergating_state state)
831 {
832 	return 0;
833 }
834 
835 /* Prototypes of private functions */
836 static int dm_early_init(void* handle);
837 
838 /* Allocate memory for FBC compressed data  */
839 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
840 {
841 	struct drm_device *dev = connector->dev;
842 	struct amdgpu_device *adev = drm_to_adev(dev);
843 	struct dm_compressor_info *compressor = &adev->dm.compressor;
844 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
845 	struct drm_display_mode *mode;
846 	unsigned long max_size = 0;
847 
848 	if (adev->dm.dc->fbc_compressor == NULL)
849 		return;
850 
851 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
852 		return;
853 
854 	if (compressor->bo_ptr)
855 		return;
856 
857 
858 	list_for_each_entry(mode, &connector->modes, head) {
859 		if (max_size < mode->htotal * mode->vtotal)
860 			max_size = mode->htotal * mode->vtotal;
861 	}
862 
863 	if (max_size) {
864 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
865 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
866 			    &compressor->gpu_addr, &compressor->cpu_addr);
867 
868 		if (r)
869 			DRM_ERROR("DM: Failed to initialize FBC\n");
870 		else {
871 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
872 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
873 		}
874 
875 	}
876 
877 }
878 
879 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
880 					  int pipe, bool *enabled,
881 					  unsigned char *buf, int max_bytes)
882 {
883 	struct drm_device *dev = dev_get_drvdata(kdev);
884 	struct amdgpu_device *adev = drm_to_adev(dev);
885 	struct drm_connector *connector;
886 	struct drm_connector_list_iter conn_iter;
887 	struct amdgpu_dm_connector *aconnector;
888 	int ret = 0;
889 
890 	*enabled = false;
891 
892 	mutex_lock(&adev->dm.audio_lock);
893 
894 	drm_connector_list_iter_begin(dev, &conn_iter);
895 	drm_for_each_connector_iter(connector, &conn_iter) {
896 		aconnector = to_amdgpu_dm_connector(connector);
897 		if (aconnector->audio_inst != port)
898 			continue;
899 
900 		*enabled = true;
901 		ret = drm_eld_size(connector->eld);
902 		memcpy(buf, connector->eld, min(max_bytes, ret));
903 
904 		break;
905 	}
906 	drm_connector_list_iter_end(&conn_iter);
907 
908 	mutex_unlock(&adev->dm.audio_lock);
909 
910 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
911 
912 	return ret;
913 }
914 
915 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
916 	.get_eld = amdgpu_dm_audio_component_get_eld,
917 };
918 
919 static int amdgpu_dm_audio_component_bind(struct device *kdev,
920 				       struct device *hda_kdev, void *data)
921 {
922 	struct drm_device *dev = dev_get_drvdata(kdev);
923 	struct amdgpu_device *adev = drm_to_adev(dev);
924 	struct drm_audio_component *acomp = data;
925 
926 	acomp->ops = &amdgpu_dm_audio_component_ops;
927 	acomp->dev = kdev;
928 	adev->dm.audio_component = acomp;
929 
930 	return 0;
931 }
932 
933 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
934 					  struct device *hda_kdev, void *data)
935 {
936 	struct drm_device *dev = dev_get_drvdata(kdev);
937 	struct amdgpu_device *adev = drm_to_adev(dev);
938 	struct drm_audio_component *acomp = data;
939 
940 	acomp->ops = NULL;
941 	acomp->dev = NULL;
942 	adev->dm.audio_component = NULL;
943 }
944 
945 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
946 	.bind	= amdgpu_dm_audio_component_bind,
947 	.unbind	= amdgpu_dm_audio_component_unbind,
948 };
949 
950 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
951 {
952 	int i, ret;
953 
954 	if (!amdgpu_audio)
955 		return 0;
956 
957 	adev->mode_info.audio.enabled = true;
958 
959 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
960 
961 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
962 		adev->mode_info.audio.pin[i].channels = -1;
963 		adev->mode_info.audio.pin[i].rate = -1;
964 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
965 		adev->mode_info.audio.pin[i].status_bits = 0;
966 		adev->mode_info.audio.pin[i].category_code = 0;
967 		adev->mode_info.audio.pin[i].connected = false;
968 		adev->mode_info.audio.pin[i].id =
969 			adev->dm.dc->res_pool->audios[i]->inst;
970 		adev->mode_info.audio.pin[i].offset = 0;
971 	}
972 
973 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
974 	if (ret < 0)
975 		return ret;
976 
977 	adev->dm.audio_registered = true;
978 
979 	return 0;
980 }
981 
982 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
983 {
984 	if (!amdgpu_audio)
985 		return;
986 
987 	if (!adev->mode_info.audio.enabled)
988 		return;
989 
990 	if (adev->dm.audio_registered) {
991 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
992 		adev->dm.audio_registered = false;
993 	}
994 
995 	/* TODO: Disable audio? */
996 
997 	adev->mode_info.audio.enabled = false;
998 }
999 
1000 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1001 {
1002 	struct drm_audio_component *acomp = adev->dm.audio_component;
1003 
1004 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1005 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1006 
1007 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1008 						 pin, -1);
1009 	}
1010 }
1011 
1012 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1013 {
1014 	const struct dmcub_firmware_header_v1_0 *hdr;
1015 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1016 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1017 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1018 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1019 	struct abm *abm = adev->dm.dc->res_pool->abm;
1020 	struct dmub_srv_hw_params hw_params;
1021 	enum dmub_status status;
1022 	const unsigned char *fw_inst_const, *fw_bss_data;
1023 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1024 	bool has_hw_support;
1025 	struct dc *dc = adev->dm.dc;
1026 
1027 	if (!dmub_srv)
1028 		/* DMUB isn't supported on the ASIC. */
1029 		return 0;
1030 
1031 	if (!fb_info) {
1032 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1033 		return -EINVAL;
1034 	}
1035 
1036 	if (!dmub_fw) {
1037 		/* Firmware required for DMUB support. */
1038 		DRM_ERROR("No firmware provided for DMUB.\n");
1039 		return -EINVAL;
1040 	}
1041 
1042 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1043 	if (status != DMUB_STATUS_OK) {
1044 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1045 		return -EINVAL;
1046 	}
1047 
1048 	if (!has_hw_support) {
1049 		DRM_INFO("DMUB unsupported on ASIC\n");
1050 		return 0;
1051 	}
1052 
1053 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054 
1055 	fw_inst_const = dmub_fw->data +
1056 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1057 			PSP_HEADER_BYTES;
1058 
1059 	fw_bss_data = dmub_fw->data +
1060 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061 		      le32_to_cpu(hdr->inst_const_bytes);
1062 
1063 	/* Copy firmware and bios info into FB memory. */
1064 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066 
1067 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068 
1069 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1071 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1072 	 * will be done by dm_dmub_hw_init
1073 	 */
1074 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076 				fw_inst_const_size);
1077 	}
1078 
1079 	if (fw_bss_data_size)
1080 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081 		       fw_bss_data, fw_bss_data_size);
1082 
1083 	/* Copy firmware bios info into FB memory. */
1084 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1085 	       adev->bios_size);
1086 
1087 	/* Reset regions that need to be reset. */
1088 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090 
1091 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093 
1094 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1096 
1097 	/* Initialize hardware. */
1098 	memset(&hw_params, 0, sizeof(hw_params));
1099 	hw_params.fb_base = adev->gmc.fb_start;
1100 	hw_params.fb_offset = adev->gmc.aper_base;
1101 
1102 	/* backdoor load firmware and trigger dmub running */
1103 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104 		hw_params.load_inst_const = true;
1105 
1106 	if (dmcu)
1107 		hw_params.psp_version = dmcu->psp_version;
1108 
1109 	for (i = 0; i < fb_info->num_fb; ++i)
1110 		hw_params.fb[i] = &fb_info->fb[i];
1111 
1112 	switch (adev->asic_type) {
1113 	case CHIP_YELLOW_CARP:
1114 		if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1115 			hw_params.dpia_supported = true;
1116 #if defined(CONFIG_DRM_AMD_DC_DCN)
1117 			hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1118 #endif
1119 		}
1120 		break;
1121 	default:
1122 		break;
1123 	}
1124 
1125 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1126 	if (status != DMUB_STATUS_OK) {
1127 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1128 		return -EINVAL;
1129 	}
1130 
1131 	/* Wait for firmware load to finish. */
1132 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1133 	if (status != DMUB_STATUS_OK)
1134 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1135 
1136 	/* Init DMCU and ABM if available. */
1137 	if (dmcu && abm) {
1138 		dmcu->funcs->dmcu_init(dmcu);
1139 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1140 	}
1141 
1142 	if (!adev->dm.dc->ctx->dmub_srv)
1143 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1144 	if (!adev->dm.dc->ctx->dmub_srv) {
1145 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1146 		return -ENOMEM;
1147 	}
1148 
1149 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1150 		 adev->dm.dmcub_fw_version);
1151 
1152 	return 0;
1153 }
1154 
1155 #if defined(CONFIG_DRM_AMD_DC_DCN)
1156 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1157 {
1158 	uint64_t pt_base;
1159 	uint32_t logical_addr_low;
1160 	uint32_t logical_addr_high;
1161 	uint32_t agp_base, agp_bot, agp_top;
1162 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1163 
1164 	memset(pa_config, 0, sizeof(*pa_config));
1165 
1166 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1167 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1168 
1169 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1170 		/*
1171 		 * Raven2 has a HW issue that it is unable to use the vram which
1172 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1173 		 * workaround that increase system aperture high address (add 1)
1174 		 * to get rid of the VM fault and hardware hang.
1175 		 */
1176 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1177 	else
1178 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1179 
1180 	agp_base = 0;
1181 	agp_bot = adev->gmc.agp_start >> 24;
1182 	agp_top = adev->gmc.agp_end >> 24;
1183 
1184 
1185 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1186 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1187 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1188 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1189 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1190 	page_table_base.low_part = lower_32_bits(pt_base);
1191 
1192 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1193 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1194 
1195 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1196 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1197 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1198 
1199 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1200 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1201 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1202 
1203 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1204 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1205 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1206 
1207 	pa_config->is_hvm_enabled = 0;
1208 
1209 }
1210 #endif
1211 #if defined(CONFIG_DRM_AMD_DC_DCN)
1212 static void vblank_control_worker(struct work_struct *work)
1213 {
1214 	struct vblank_control_work *vblank_work =
1215 		container_of(work, struct vblank_control_work, work);
1216 	struct amdgpu_display_manager *dm = vblank_work->dm;
1217 
1218 	mutex_lock(&dm->dc_lock);
1219 
1220 	if (vblank_work->enable)
1221 		dm->active_vblank_irq_count++;
1222 	else if(dm->active_vblank_irq_count)
1223 		dm->active_vblank_irq_count--;
1224 
1225 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1226 
1227 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1228 
1229 	/* Control PSR based on vblank requirements from OS */
1230 	if (vblank_work->stream && vblank_work->stream->link) {
1231 		if (vblank_work->enable) {
1232 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1233 				amdgpu_dm_psr_disable(vblank_work->stream);
1234 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1235 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1236 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1237 			amdgpu_dm_psr_enable(vblank_work->stream);
1238 		}
1239 	}
1240 
1241 	mutex_unlock(&dm->dc_lock);
1242 
1243 	dc_stream_release(vblank_work->stream);
1244 
1245 	kfree(vblank_work);
1246 }
1247 
1248 #endif
1249 
1250 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1251 {
1252 	struct hpd_rx_irq_offload_work *offload_work;
1253 	struct amdgpu_dm_connector *aconnector;
1254 	struct dc_link *dc_link;
1255 	struct amdgpu_device *adev;
1256 	enum dc_connection_type new_connection_type = dc_connection_none;
1257 	unsigned long flags;
1258 
1259 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1260 	aconnector = offload_work->offload_wq->aconnector;
1261 
1262 	if (!aconnector) {
1263 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1264 		goto skip;
1265 	}
1266 
1267 	adev = drm_to_adev(aconnector->base.dev);
1268 	dc_link = aconnector->dc_link;
1269 
1270 	mutex_lock(&aconnector->hpd_lock);
1271 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1272 		DRM_ERROR("KMS: Failed to detect connector\n");
1273 	mutex_unlock(&aconnector->hpd_lock);
1274 
1275 	if (new_connection_type == dc_connection_none)
1276 		goto skip;
1277 
1278 	if (amdgpu_in_reset(adev))
1279 		goto skip;
1280 
1281 	mutex_lock(&adev->dm.dc_lock);
1282 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1283 		dc_link_dp_handle_automated_test(dc_link);
1284 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1285 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1286 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1287 		dc_link_dp_handle_link_loss(dc_link);
1288 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1289 		offload_work->offload_wq->is_handling_link_loss = false;
1290 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1291 	}
1292 	mutex_unlock(&adev->dm.dc_lock);
1293 
1294 skip:
1295 	kfree(offload_work);
1296 
1297 }
1298 
1299 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1300 {
1301 	int max_caps = dc->caps.max_links;
1302 	int i = 0;
1303 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1304 
1305 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1306 
1307 	if (!hpd_rx_offload_wq)
1308 		return NULL;
1309 
1310 
1311 	for (i = 0; i < max_caps; i++) {
1312 		hpd_rx_offload_wq[i].wq =
1313 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1314 
1315 		if (hpd_rx_offload_wq[i].wq == NULL) {
1316 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1317 			return NULL;
1318 		}
1319 
1320 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1321 	}
1322 
1323 	return hpd_rx_offload_wq;
1324 }
1325 
1326 struct amdgpu_stutter_quirk {
1327 	u16 chip_vendor;
1328 	u16 chip_device;
1329 	u16 subsys_vendor;
1330 	u16 subsys_device;
1331 	u8 revision;
1332 };
1333 
1334 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1335 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1336 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1337 	{ 0, 0, 0, 0, 0 },
1338 };
1339 
1340 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1341 {
1342 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1343 
1344 	while (p && p->chip_device != 0) {
1345 		if (pdev->vendor == p->chip_vendor &&
1346 		    pdev->device == p->chip_device &&
1347 		    pdev->subsystem_vendor == p->subsys_vendor &&
1348 		    pdev->subsystem_device == p->subsys_device &&
1349 		    pdev->revision == p->revision) {
1350 			return true;
1351 		}
1352 		++p;
1353 	}
1354 	return false;
1355 }
1356 
1357 static int amdgpu_dm_init(struct amdgpu_device *adev)
1358 {
1359 	struct dc_init_data init_data;
1360 #ifdef CONFIG_DRM_AMD_DC_HDCP
1361 	struct dc_callback_init init_params;
1362 #endif
1363 	int r;
1364 
1365 	adev->dm.ddev = adev_to_drm(adev);
1366 	adev->dm.adev = adev;
1367 
1368 	/* Zero all the fields */
1369 	memset(&init_data, 0, sizeof(init_data));
1370 #ifdef CONFIG_DRM_AMD_DC_HDCP
1371 	memset(&init_params, 0, sizeof(init_params));
1372 #endif
1373 
1374 	mutex_init(&adev->dm.dc_lock);
1375 	mutex_init(&adev->dm.audio_lock);
1376 #if defined(CONFIG_DRM_AMD_DC_DCN)
1377 	spin_lock_init(&adev->dm.vblank_lock);
1378 #endif
1379 
1380 	if(amdgpu_dm_irq_init(adev)) {
1381 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1382 		goto error;
1383 	}
1384 
1385 	init_data.asic_id.chip_family = adev->family;
1386 
1387 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1388 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1389 	init_data.asic_id.chip_id = adev->pdev->device;
1390 
1391 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1392 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1393 	init_data.asic_id.atombios_base_address =
1394 		adev->mode_info.atom_context->bios;
1395 
1396 	init_data.driver = adev;
1397 
1398 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1399 
1400 	if (!adev->dm.cgs_device) {
1401 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1402 		goto error;
1403 	}
1404 
1405 	init_data.cgs_device = adev->dm.cgs_device;
1406 
1407 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1408 
1409 	switch (adev->asic_type) {
1410 	case CHIP_CARRIZO:
1411 	case CHIP_STONEY:
1412 		init_data.flags.gpu_vm_support = true;
1413 		break;
1414 	default:
1415 		switch (adev->ip_versions[DCE_HWIP][0]) {
1416 		case IP_VERSION(2, 1, 0):
1417 			init_data.flags.gpu_vm_support = true;
1418 			switch (adev->dm.dmcub_fw_version) {
1419 			case 0: /* development */
1420 			case 0x1: /* linux-firmware.git hash 6d9f399 */
1421 			case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1422 				init_data.flags.disable_dmcu = false;
1423 				break;
1424 			default:
1425 				init_data.flags.disable_dmcu = true;
1426 			}
1427 			break;
1428 		case IP_VERSION(1, 0, 0):
1429 		case IP_VERSION(1, 0, 1):
1430 		case IP_VERSION(3, 0, 1):
1431 		case IP_VERSION(3, 1, 2):
1432 		case IP_VERSION(3, 1, 3):
1433 			init_data.flags.gpu_vm_support = true;
1434 			break;
1435 		case IP_VERSION(2, 0, 3):
1436 			init_data.flags.disable_dmcu = true;
1437 			break;
1438 		default:
1439 			break;
1440 		}
1441 		break;
1442 	}
1443 
1444 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1445 		init_data.flags.fbc_support = true;
1446 
1447 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1448 		init_data.flags.multi_mon_pp_mclk_switch = true;
1449 
1450 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1451 		init_data.flags.disable_fractional_pwm = true;
1452 
1453 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1454 		init_data.flags.edp_no_power_sequencing = true;
1455 
1456 	init_data.flags.power_down_display_on_boot = true;
1457 
1458 	if (check_seamless_boot_capability(adev)) {
1459 		init_data.flags.power_down_display_on_boot = false;
1460 		init_data.flags.allow_seamless_boot_optimization = true;
1461 		DRM_INFO("Seamless boot condition check passed\n");
1462 	}
1463 
1464 	INIT_LIST_HEAD(&adev->dm.da_list);
1465 	/* Display Core create. */
1466 	adev->dm.dc = dc_create(&init_data);
1467 
1468 	if (adev->dm.dc) {
1469 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1470 	} else {
1471 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1472 		goto error;
1473 	}
1474 
1475 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1476 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1477 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1478 	}
1479 
1480 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1481 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1482 	if (dm_should_disable_stutter(adev->pdev))
1483 		adev->dm.dc->debug.disable_stutter = true;
1484 
1485 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1486 		adev->dm.dc->debug.disable_stutter = true;
1487 
1488 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1489 		adev->dm.dc->debug.disable_dsc = true;
1490 		adev->dm.dc->debug.disable_dsc_edp = true;
1491 	}
1492 
1493 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1494 		adev->dm.dc->debug.disable_clock_gate = true;
1495 
1496 	r = dm_dmub_hw_init(adev);
1497 	if (r) {
1498 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1499 		goto error;
1500 	}
1501 
1502 	dc_hardware_init(adev->dm.dc);
1503 
1504 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1505 	if (!adev->dm.hpd_rx_offload_wq) {
1506 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1507 		goto error;
1508 	}
1509 
1510 #if defined(CONFIG_DRM_AMD_DC_DCN)
1511 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1512 		struct dc_phy_addr_space_config pa_config;
1513 
1514 		mmhub_read_system_context(adev, &pa_config);
1515 
1516 		// Call the DC init_memory func
1517 		dc_setup_system_context(adev->dm.dc, &pa_config);
1518 	}
1519 #endif
1520 
1521 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1522 	if (!adev->dm.freesync_module) {
1523 		DRM_ERROR(
1524 		"amdgpu: failed to initialize freesync_module.\n");
1525 	} else
1526 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1527 				adev->dm.freesync_module);
1528 
1529 	amdgpu_dm_init_color_mod();
1530 
1531 #if defined(CONFIG_DRM_AMD_DC_DCN)
1532 	if (adev->dm.dc->caps.max_links > 0) {
1533 		adev->dm.vblank_control_workqueue =
1534 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1535 		if (!adev->dm.vblank_control_workqueue)
1536 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1537 	}
1538 #endif
1539 
1540 #ifdef CONFIG_DRM_AMD_DC_HDCP
1541 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1542 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1543 
1544 		if (!adev->dm.hdcp_workqueue)
1545 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1546 		else
1547 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1548 
1549 		dc_init_callbacks(adev->dm.dc, &init_params);
1550 	}
1551 #endif
1552 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1553 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1554 #endif
1555 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1556 		init_completion(&adev->dm.dmub_aux_transfer_done);
1557 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1558 		if (!adev->dm.dmub_notify) {
1559 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1560 			goto error;
1561 		}
1562 
1563 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1564 		if (!adev->dm.delayed_hpd_wq) {
1565 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1566 			goto error;
1567 		}
1568 
1569 		amdgpu_dm_outbox_init(adev);
1570 #if defined(CONFIG_DRM_AMD_DC_DCN)
1571 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1572 			dmub_aux_setconfig_callback, false)) {
1573 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1574 			goto error;
1575 		}
1576 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1577 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1578 			goto error;
1579 		}
1580 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1581 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1582 			goto error;
1583 		}
1584 #endif /* CONFIG_DRM_AMD_DC_DCN */
1585 	}
1586 
1587 	if (amdgpu_dm_initialize_drm_device(adev)) {
1588 		DRM_ERROR(
1589 		"amdgpu: failed to initialize sw for display support.\n");
1590 		goto error;
1591 	}
1592 
1593 	/* create fake encoders for MST */
1594 	dm_dp_create_fake_mst_encoders(adev);
1595 
1596 	/* TODO: Add_display_info? */
1597 
1598 	/* TODO use dynamic cursor width */
1599 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1600 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1601 
1602 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1603 		DRM_ERROR(
1604 		"amdgpu: failed to initialize sw for display support.\n");
1605 		goto error;
1606 	}
1607 
1608 
1609 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1610 
1611 	return 0;
1612 error:
1613 	amdgpu_dm_fini(adev);
1614 
1615 	return -EINVAL;
1616 }
1617 
1618 static int amdgpu_dm_early_fini(void *handle)
1619 {
1620 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1621 
1622 	amdgpu_dm_audio_fini(adev);
1623 
1624 	return 0;
1625 }
1626 
1627 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1628 {
1629 	int i;
1630 
1631 #if defined(CONFIG_DRM_AMD_DC_DCN)
1632 	if (adev->dm.vblank_control_workqueue) {
1633 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1634 		adev->dm.vblank_control_workqueue = NULL;
1635 	}
1636 #endif
1637 
1638 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1639 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1640 	}
1641 
1642 	amdgpu_dm_destroy_drm_device(&adev->dm);
1643 
1644 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1645 	if (adev->dm.crc_rd_wrk) {
1646 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1647 		kfree(adev->dm.crc_rd_wrk);
1648 		adev->dm.crc_rd_wrk = NULL;
1649 	}
1650 #endif
1651 #ifdef CONFIG_DRM_AMD_DC_HDCP
1652 	if (adev->dm.hdcp_workqueue) {
1653 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1654 		adev->dm.hdcp_workqueue = NULL;
1655 	}
1656 
1657 	if (adev->dm.dc)
1658 		dc_deinit_callbacks(adev->dm.dc);
1659 #endif
1660 
1661 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1662 
1663 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1664 		kfree(adev->dm.dmub_notify);
1665 		adev->dm.dmub_notify = NULL;
1666 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1667 		adev->dm.delayed_hpd_wq = NULL;
1668 	}
1669 
1670 	if (adev->dm.dmub_bo)
1671 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1672 				      &adev->dm.dmub_bo_gpu_addr,
1673 				      &adev->dm.dmub_bo_cpu_addr);
1674 
1675 	if (adev->dm.hpd_rx_offload_wq) {
1676 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1677 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1678 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1679 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1680 			}
1681 		}
1682 
1683 		kfree(adev->dm.hpd_rx_offload_wq);
1684 		adev->dm.hpd_rx_offload_wq = NULL;
1685 	}
1686 
1687 	/* DC Destroy TODO: Replace destroy DAL */
1688 	if (adev->dm.dc)
1689 		dc_destroy(&adev->dm.dc);
1690 	/*
1691 	 * TODO: pageflip, vlank interrupt
1692 	 *
1693 	 * amdgpu_dm_irq_fini(adev);
1694 	 */
1695 
1696 	if (adev->dm.cgs_device) {
1697 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1698 		adev->dm.cgs_device = NULL;
1699 	}
1700 	if (adev->dm.freesync_module) {
1701 		mod_freesync_destroy(adev->dm.freesync_module);
1702 		adev->dm.freesync_module = NULL;
1703 	}
1704 
1705 	mutex_destroy(&adev->dm.audio_lock);
1706 	mutex_destroy(&adev->dm.dc_lock);
1707 
1708 	return;
1709 }
1710 
1711 static int load_dmcu_fw(struct amdgpu_device *adev)
1712 {
1713 	const char *fw_name_dmcu = NULL;
1714 	int r;
1715 	const struct dmcu_firmware_header_v1_0 *hdr;
1716 
1717 	switch(adev->asic_type) {
1718 #if defined(CONFIG_DRM_AMD_DC_SI)
1719 	case CHIP_TAHITI:
1720 	case CHIP_PITCAIRN:
1721 	case CHIP_VERDE:
1722 	case CHIP_OLAND:
1723 #endif
1724 	case CHIP_BONAIRE:
1725 	case CHIP_HAWAII:
1726 	case CHIP_KAVERI:
1727 	case CHIP_KABINI:
1728 	case CHIP_MULLINS:
1729 	case CHIP_TONGA:
1730 	case CHIP_FIJI:
1731 	case CHIP_CARRIZO:
1732 	case CHIP_STONEY:
1733 	case CHIP_POLARIS11:
1734 	case CHIP_POLARIS10:
1735 	case CHIP_POLARIS12:
1736 	case CHIP_VEGAM:
1737 	case CHIP_VEGA10:
1738 	case CHIP_VEGA12:
1739 	case CHIP_VEGA20:
1740 		return 0;
1741 	case CHIP_NAVI12:
1742 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1743 		break;
1744 	case CHIP_RAVEN:
1745 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1746 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1747 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1748 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1749 		else
1750 			return 0;
1751 		break;
1752 	default:
1753 		switch (adev->ip_versions[DCE_HWIP][0]) {
1754 		case IP_VERSION(2, 0, 2):
1755 		case IP_VERSION(2, 0, 3):
1756 		case IP_VERSION(2, 0, 0):
1757 		case IP_VERSION(2, 1, 0):
1758 		case IP_VERSION(3, 0, 0):
1759 		case IP_VERSION(3, 0, 2):
1760 		case IP_VERSION(3, 0, 3):
1761 		case IP_VERSION(3, 0, 1):
1762 		case IP_VERSION(3, 1, 2):
1763 		case IP_VERSION(3, 1, 3):
1764 			return 0;
1765 		default:
1766 			break;
1767 		}
1768 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1769 		return -EINVAL;
1770 	}
1771 
1772 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1773 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1774 		return 0;
1775 	}
1776 
1777 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1778 	if (r == -ENOENT) {
1779 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1780 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1781 		adev->dm.fw_dmcu = NULL;
1782 		return 0;
1783 	}
1784 	if (r) {
1785 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1786 			fw_name_dmcu);
1787 		return r;
1788 	}
1789 
1790 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1791 	if (r) {
1792 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1793 			fw_name_dmcu);
1794 		release_firmware(adev->dm.fw_dmcu);
1795 		adev->dm.fw_dmcu = NULL;
1796 		return r;
1797 	}
1798 
1799 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1800 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1801 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1802 	adev->firmware.fw_size +=
1803 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1804 
1805 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1806 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1807 	adev->firmware.fw_size +=
1808 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1809 
1810 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1811 
1812 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1813 
1814 	return 0;
1815 }
1816 
1817 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1818 {
1819 	struct amdgpu_device *adev = ctx;
1820 
1821 	return dm_read_reg(adev->dm.dc->ctx, address);
1822 }
1823 
1824 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1825 				     uint32_t value)
1826 {
1827 	struct amdgpu_device *adev = ctx;
1828 
1829 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1830 }
1831 
1832 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1833 {
1834 	struct dmub_srv_create_params create_params;
1835 	struct dmub_srv_region_params region_params;
1836 	struct dmub_srv_region_info region_info;
1837 	struct dmub_srv_fb_params fb_params;
1838 	struct dmub_srv_fb_info *fb_info;
1839 	struct dmub_srv *dmub_srv;
1840 	const struct dmcub_firmware_header_v1_0 *hdr;
1841 	const char *fw_name_dmub;
1842 	enum dmub_asic dmub_asic;
1843 	enum dmub_status status;
1844 	int r;
1845 
1846 	switch (adev->ip_versions[DCE_HWIP][0]) {
1847 	case IP_VERSION(2, 1, 0):
1848 		dmub_asic = DMUB_ASIC_DCN21;
1849 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1850 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1851 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1852 		break;
1853 	case IP_VERSION(3, 0, 0):
1854 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1855 			dmub_asic = DMUB_ASIC_DCN30;
1856 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1857 		} else {
1858 			dmub_asic = DMUB_ASIC_DCN30;
1859 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1860 		}
1861 		break;
1862 	case IP_VERSION(3, 0, 1):
1863 		dmub_asic = DMUB_ASIC_DCN301;
1864 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1865 		break;
1866 	case IP_VERSION(3, 0, 2):
1867 		dmub_asic = DMUB_ASIC_DCN302;
1868 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1869 		break;
1870 	case IP_VERSION(3, 0, 3):
1871 		dmub_asic = DMUB_ASIC_DCN303;
1872 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1873 		break;
1874 	case IP_VERSION(3, 1, 2):
1875 	case IP_VERSION(3, 1, 3):
1876 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1877 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1878 		break;
1879 
1880 	default:
1881 		/* ASIC doesn't support DMUB. */
1882 		return 0;
1883 	}
1884 
1885 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1886 	if (r) {
1887 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1888 		return 0;
1889 	}
1890 
1891 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1892 	if (r) {
1893 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1894 		return 0;
1895 	}
1896 
1897 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1898 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1899 
1900 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1901 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1902 			AMDGPU_UCODE_ID_DMCUB;
1903 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1904 			adev->dm.dmub_fw;
1905 		adev->firmware.fw_size +=
1906 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1907 
1908 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1909 			 adev->dm.dmcub_fw_version);
1910 	}
1911 
1912 
1913 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1914 	dmub_srv = adev->dm.dmub_srv;
1915 
1916 	if (!dmub_srv) {
1917 		DRM_ERROR("Failed to allocate DMUB service!\n");
1918 		return -ENOMEM;
1919 	}
1920 
1921 	memset(&create_params, 0, sizeof(create_params));
1922 	create_params.user_ctx = adev;
1923 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1924 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1925 	create_params.asic = dmub_asic;
1926 
1927 	/* Create the DMUB service. */
1928 	status = dmub_srv_create(dmub_srv, &create_params);
1929 	if (status != DMUB_STATUS_OK) {
1930 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1931 		return -EINVAL;
1932 	}
1933 
1934 	/* Calculate the size of all the regions for the DMUB service. */
1935 	memset(&region_params, 0, sizeof(region_params));
1936 
1937 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1938 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1939 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1940 	region_params.vbios_size = adev->bios_size;
1941 	region_params.fw_bss_data = region_params.bss_data_size ?
1942 		adev->dm.dmub_fw->data +
1943 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1944 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1945 	region_params.fw_inst_const =
1946 		adev->dm.dmub_fw->data +
1947 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1948 		PSP_HEADER_BYTES;
1949 
1950 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1951 					   &region_info);
1952 
1953 	if (status != DMUB_STATUS_OK) {
1954 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1955 		return -EINVAL;
1956 	}
1957 
1958 	/*
1959 	 * Allocate a framebuffer based on the total size of all the regions.
1960 	 * TODO: Move this into GART.
1961 	 */
1962 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1963 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1964 				    &adev->dm.dmub_bo_gpu_addr,
1965 				    &adev->dm.dmub_bo_cpu_addr);
1966 	if (r)
1967 		return r;
1968 
1969 	/* Rebase the regions on the framebuffer address. */
1970 	memset(&fb_params, 0, sizeof(fb_params));
1971 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1972 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1973 	fb_params.region_info = &region_info;
1974 
1975 	adev->dm.dmub_fb_info =
1976 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1977 	fb_info = adev->dm.dmub_fb_info;
1978 
1979 	if (!fb_info) {
1980 		DRM_ERROR(
1981 			"Failed to allocate framebuffer info for DMUB service!\n");
1982 		return -ENOMEM;
1983 	}
1984 
1985 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1986 	if (status != DMUB_STATUS_OK) {
1987 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1988 		return -EINVAL;
1989 	}
1990 
1991 	return 0;
1992 }
1993 
1994 static int dm_sw_init(void *handle)
1995 {
1996 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1997 	int r;
1998 
1999 	r = dm_dmub_sw_init(adev);
2000 	if (r)
2001 		return r;
2002 
2003 	return load_dmcu_fw(adev);
2004 }
2005 
2006 static int dm_sw_fini(void *handle)
2007 {
2008 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2009 
2010 	kfree(adev->dm.dmub_fb_info);
2011 	adev->dm.dmub_fb_info = NULL;
2012 
2013 	if (adev->dm.dmub_srv) {
2014 		dmub_srv_destroy(adev->dm.dmub_srv);
2015 		adev->dm.dmub_srv = NULL;
2016 	}
2017 
2018 	release_firmware(adev->dm.dmub_fw);
2019 	adev->dm.dmub_fw = NULL;
2020 
2021 	release_firmware(adev->dm.fw_dmcu);
2022 	adev->dm.fw_dmcu = NULL;
2023 
2024 	return 0;
2025 }
2026 
2027 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2028 {
2029 	struct amdgpu_dm_connector *aconnector;
2030 	struct drm_connector *connector;
2031 	struct drm_connector_list_iter iter;
2032 	int ret = 0;
2033 
2034 	drm_connector_list_iter_begin(dev, &iter);
2035 	drm_for_each_connector_iter(connector, &iter) {
2036 		aconnector = to_amdgpu_dm_connector(connector);
2037 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2038 		    aconnector->mst_mgr.aux) {
2039 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2040 					 aconnector,
2041 					 aconnector->base.base.id);
2042 
2043 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2044 			if (ret < 0) {
2045 				DRM_ERROR("DM_MST: Failed to start MST\n");
2046 				aconnector->dc_link->type =
2047 					dc_connection_single;
2048 				break;
2049 			}
2050 		}
2051 	}
2052 	drm_connector_list_iter_end(&iter);
2053 
2054 	return ret;
2055 }
2056 
2057 static int dm_late_init(void *handle)
2058 {
2059 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2060 
2061 	struct dmcu_iram_parameters params;
2062 	unsigned int linear_lut[16];
2063 	int i;
2064 	struct dmcu *dmcu = NULL;
2065 
2066 	dmcu = adev->dm.dc->res_pool->dmcu;
2067 
2068 	for (i = 0; i < 16; i++)
2069 		linear_lut[i] = 0xFFFF * i / 15;
2070 
2071 	params.set = 0;
2072 	params.backlight_ramping_override = false;
2073 	params.backlight_ramping_start = 0xCCCC;
2074 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2075 	params.backlight_lut_array_size = 16;
2076 	params.backlight_lut_array = linear_lut;
2077 
2078 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2079 	 * 0xFFFF x 0.01 = 0x28F
2080 	 */
2081 	params.min_abm_backlight = 0x28F;
2082 	/* In the case where abm is implemented on dmcub,
2083 	* dmcu object will be null.
2084 	* ABM 2.4 and up are implemented on dmcub.
2085 	*/
2086 	if (dmcu) {
2087 		if (!dmcu_load_iram(dmcu, params))
2088 			return -EINVAL;
2089 	} else if (adev->dm.dc->ctx->dmub_srv) {
2090 		struct dc_link *edp_links[MAX_NUM_EDP];
2091 		int edp_num;
2092 
2093 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2094 		for (i = 0; i < edp_num; i++) {
2095 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2096 				return -EINVAL;
2097 		}
2098 	}
2099 
2100 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2101 }
2102 
2103 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2104 {
2105 	struct amdgpu_dm_connector *aconnector;
2106 	struct drm_connector *connector;
2107 	struct drm_connector_list_iter iter;
2108 	struct drm_dp_mst_topology_mgr *mgr;
2109 	int ret;
2110 	bool need_hotplug = false;
2111 
2112 	drm_connector_list_iter_begin(dev, &iter);
2113 	drm_for_each_connector_iter(connector, &iter) {
2114 		aconnector = to_amdgpu_dm_connector(connector);
2115 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2116 		    aconnector->mst_port)
2117 			continue;
2118 
2119 		mgr = &aconnector->mst_mgr;
2120 
2121 		if (suspend) {
2122 			drm_dp_mst_topology_mgr_suspend(mgr);
2123 		} else {
2124 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2125 			if (ret < 0) {
2126 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2127 				need_hotplug = true;
2128 			}
2129 		}
2130 	}
2131 	drm_connector_list_iter_end(&iter);
2132 
2133 	if (need_hotplug)
2134 		drm_kms_helper_hotplug_event(dev);
2135 }
2136 
2137 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2138 {
2139 	struct smu_context *smu = &adev->smu;
2140 	int ret = 0;
2141 
2142 	if (!is_support_sw_smu(adev))
2143 		return 0;
2144 
2145 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2146 	 * on window driver dc implementation.
2147 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2148 	 * should be passed to smu during boot up and resume from s3.
2149 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2150 	 * dcn20_resource_construct
2151 	 * then call pplib functions below to pass the settings to smu:
2152 	 * smu_set_watermarks_for_clock_ranges
2153 	 * smu_set_watermarks_table
2154 	 * navi10_set_watermarks_table
2155 	 * smu_write_watermarks_table
2156 	 *
2157 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2158 	 * dc has implemented different flow for window driver:
2159 	 * dc_hardware_init / dc_set_power_state
2160 	 * dcn10_init_hw
2161 	 * notify_wm_ranges
2162 	 * set_wm_ranges
2163 	 * -- Linux
2164 	 * smu_set_watermarks_for_clock_ranges
2165 	 * renoir_set_watermarks_table
2166 	 * smu_write_watermarks_table
2167 	 *
2168 	 * For Linux,
2169 	 * dc_hardware_init -> amdgpu_dm_init
2170 	 * dc_set_power_state --> dm_resume
2171 	 *
2172 	 * therefore, this function apply to navi10/12/14 but not Renoir
2173 	 * *
2174 	 */
2175 	switch (adev->ip_versions[DCE_HWIP][0]) {
2176 	case IP_VERSION(2, 0, 2):
2177 	case IP_VERSION(2, 0, 0):
2178 		break;
2179 	default:
2180 		return 0;
2181 	}
2182 
2183 	ret = smu_write_watermarks_table(smu);
2184 	if (ret) {
2185 		DRM_ERROR("Failed to update WMTABLE!\n");
2186 		return ret;
2187 	}
2188 
2189 	return 0;
2190 }
2191 
2192 /**
2193  * dm_hw_init() - Initialize DC device
2194  * @handle: The base driver device containing the amdgpu_dm device.
2195  *
2196  * Initialize the &struct amdgpu_display_manager device. This involves calling
2197  * the initializers of each DM component, then populating the struct with them.
2198  *
2199  * Although the function implies hardware initialization, both hardware and
2200  * software are initialized here. Splitting them out to their relevant init
2201  * hooks is a future TODO item.
2202  *
2203  * Some notable things that are initialized here:
2204  *
2205  * - Display Core, both software and hardware
2206  * - DC modules that we need (freesync and color management)
2207  * - DRM software states
2208  * - Interrupt sources and handlers
2209  * - Vblank support
2210  * - Debug FS entries, if enabled
2211  */
2212 static int dm_hw_init(void *handle)
2213 {
2214 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2215 	/* Create DAL display manager */
2216 	amdgpu_dm_init(adev);
2217 	amdgpu_dm_hpd_init(adev);
2218 
2219 	return 0;
2220 }
2221 
2222 /**
2223  * dm_hw_fini() - Teardown DC device
2224  * @handle: The base driver device containing the amdgpu_dm device.
2225  *
2226  * Teardown components within &struct amdgpu_display_manager that require
2227  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2228  * were loaded. Also flush IRQ workqueues and disable them.
2229  */
2230 static int dm_hw_fini(void *handle)
2231 {
2232 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2233 
2234 	amdgpu_dm_hpd_fini(adev);
2235 
2236 	amdgpu_dm_irq_fini(adev);
2237 	amdgpu_dm_fini(adev);
2238 	return 0;
2239 }
2240 
2241 
2242 static int dm_enable_vblank(struct drm_crtc *crtc);
2243 static void dm_disable_vblank(struct drm_crtc *crtc);
2244 
2245 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2246 				 struct dc_state *state, bool enable)
2247 {
2248 	enum dc_irq_source irq_source;
2249 	struct amdgpu_crtc *acrtc;
2250 	int rc = -EBUSY;
2251 	int i = 0;
2252 
2253 	for (i = 0; i < state->stream_count; i++) {
2254 		acrtc = get_crtc_by_otg_inst(
2255 				adev, state->stream_status[i].primary_otg_inst);
2256 
2257 		if (acrtc && state->stream_status[i].plane_count != 0) {
2258 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2259 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2260 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2261 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2262 			if (rc)
2263 				DRM_WARN("Failed to %s pflip interrupts\n",
2264 					 enable ? "enable" : "disable");
2265 
2266 			if (enable) {
2267 				rc = dm_enable_vblank(&acrtc->base);
2268 				if (rc)
2269 					DRM_WARN("Failed to enable vblank interrupts\n");
2270 			} else {
2271 				dm_disable_vblank(&acrtc->base);
2272 			}
2273 
2274 		}
2275 	}
2276 
2277 }
2278 
2279 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2280 {
2281 	struct dc_state *context = NULL;
2282 	enum dc_status res = DC_ERROR_UNEXPECTED;
2283 	int i;
2284 	struct dc_stream_state *del_streams[MAX_PIPES];
2285 	int del_streams_count = 0;
2286 
2287 	memset(del_streams, 0, sizeof(del_streams));
2288 
2289 	context = dc_create_state(dc);
2290 	if (context == NULL)
2291 		goto context_alloc_fail;
2292 
2293 	dc_resource_state_copy_construct_current(dc, context);
2294 
2295 	/* First remove from context all streams */
2296 	for (i = 0; i < context->stream_count; i++) {
2297 		struct dc_stream_state *stream = context->streams[i];
2298 
2299 		del_streams[del_streams_count++] = stream;
2300 	}
2301 
2302 	/* Remove all planes for removed streams and then remove the streams */
2303 	for (i = 0; i < del_streams_count; i++) {
2304 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2305 			res = DC_FAIL_DETACH_SURFACES;
2306 			goto fail;
2307 		}
2308 
2309 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2310 		if (res != DC_OK)
2311 			goto fail;
2312 	}
2313 
2314 	res = dc_commit_state(dc, context);
2315 
2316 fail:
2317 	dc_release_state(context);
2318 
2319 context_alloc_fail:
2320 	return res;
2321 }
2322 
2323 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2324 {
2325 	int i;
2326 
2327 	if (dm->hpd_rx_offload_wq) {
2328 		for (i = 0; i < dm->dc->caps.max_links; i++)
2329 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2330 	}
2331 }
2332 
2333 static int dm_suspend(void *handle)
2334 {
2335 	struct amdgpu_device *adev = handle;
2336 	struct amdgpu_display_manager *dm = &adev->dm;
2337 	int ret = 0;
2338 
2339 	if (amdgpu_in_reset(adev)) {
2340 		mutex_lock(&dm->dc_lock);
2341 
2342 #if defined(CONFIG_DRM_AMD_DC_DCN)
2343 		dc_allow_idle_optimizations(adev->dm.dc, false);
2344 #endif
2345 
2346 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2347 
2348 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2349 
2350 		amdgpu_dm_commit_zero_streams(dm->dc);
2351 
2352 		amdgpu_dm_irq_suspend(adev);
2353 
2354 		hpd_rx_irq_work_suspend(dm);
2355 
2356 		return ret;
2357 	}
2358 
2359 	WARN_ON(adev->dm.cached_state);
2360 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2361 
2362 	s3_handle_mst(adev_to_drm(adev), true);
2363 
2364 	amdgpu_dm_irq_suspend(adev);
2365 
2366 	hpd_rx_irq_work_suspend(dm);
2367 
2368 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2369 
2370 	return 0;
2371 }
2372 
2373 static struct amdgpu_dm_connector *
2374 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2375 					     struct drm_crtc *crtc)
2376 {
2377 	uint32_t i;
2378 	struct drm_connector_state *new_con_state;
2379 	struct drm_connector *connector;
2380 	struct drm_crtc *crtc_from_state;
2381 
2382 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2383 		crtc_from_state = new_con_state->crtc;
2384 
2385 		if (crtc_from_state == crtc)
2386 			return to_amdgpu_dm_connector(connector);
2387 	}
2388 
2389 	return NULL;
2390 }
2391 
2392 static void emulated_link_detect(struct dc_link *link)
2393 {
2394 	struct dc_sink_init_data sink_init_data = { 0 };
2395 	struct display_sink_capability sink_caps = { 0 };
2396 	enum dc_edid_status edid_status;
2397 	struct dc_context *dc_ctx = link->ctx;
2398 	struct dc_sink *sink = NULL;
2399 	struct dc_sink *prev_sink = NULL;
2400 
2401 	link->type = dc_connection_none;
2402 	prev_sink = link->local_sink;
2403 
2404 	if (prev_sink)
2405 		dc_sink_release(prev_sink);
2406 
2407 	switch (link->connector_signal) {
2408 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2409 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2410 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2411 		break;
2412 	}
2413 
2414 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2415 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2416 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2417 		break;
2418 	}
2419 
2420 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2421 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2422 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2423 		break;
2424 	}
2425 
2426 	case SIGNAL_TYPE_LVDS: {
2427 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2428 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2429 		break;
2430 	}
2431 
2432 	case SIGNAL_TYPE_EDP: {
2433 		sink_caps.transaction_type =
2434 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2435 		sink_caps.signal = SIGNAL_TYPE_EDP;
2436 		break;
2437 	}
2438 
2439 	case SIGNAL_TYPE_DISPLAY_PORT: {
2440 		sink_caps.transaction_type =
2441 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2442 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2443 		break;
2444 	}
2445 
2446 	default:
2447 		DC_ERROR("Invalid connector type! signal:%d\n",
2448 			link->connector_signal);
2449 		return;
2450 	}
2451 
2452 	sink_init_data.link = link;
2453 	sink_init_data.sink_signal = sink_caps.signal;
2454 
2455 	sink = dc_sink_create(&sink_init_data);
2456 	if (!sink) {
2457 		DC_ERROR("Failed to create sink!\n");
2458 		return;
2459 	}
2460 
2461 	/* dc_sink_create returns a new reference */
2462 	link->local_sink = sink;
2463 
2464 	edid_status = dm_helpers_read_local_edid(
2465 			link->ctx,
2466 			link,
2467 			sink);
2468 
2469 	if (edid_status != EDID_OK)
2470 		DC_ERROR("Failed to read EDID");
2471 
2472 }
2473 
2474 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2475 				     struct amdgpu_display_manager *dm)
2476 {
2477 	struct {
2478 		struct dc_surface_update surface_updates[MAX_SURFACES];
2479 		struct dc_plane_info plane_infos[MAX_SURFACES];
2480 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2481 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2482 		struct dc_stream_update stream_update;
2483 	} * bundle;
2484 	int k, m;
2485 
2486 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2487 
2488 	if (!bundle) {
2489 		dm_error("Failed to allocate update bundle\n");
2490 		goto cleanup;
2491 	}
2492 
2493 	for (k = 0; k < dc_state->stream_count; k++) {
2494 		bundle->stream_update.stream = dc_state->streams[k];
2495 
2496 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2497 			bundle->surface_updates[m].surface =
2498 				dc_state->stream_status->plane_states[m];
2499 			bundle->surface_updates[m].surface->force_full_update =
2500 				true;
2501 		}
2502 		dc_commit_updates_for_stream(
2503 			dm->dc, bundle->surface_updates,
2504 			dc_state->stream_status->plane_count,
2505 			dc_state->streams[k], &bundle->stream_update, dc_state);
2506 	}
2507 
2508 cleanup:
2509 	kfree(bundle);
2510 
2511 	return;
2512 }
2513 
2514 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2515 {
2516 	struct dc_stream_state *stream_state;
2517 	struct amdgpu_dm_connector *aconnector = link->priv;
2518 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2519 	struct dc_stream_update stream_update;
2520 	bool dpms_off = true;
2521 
2522 	memset(&stream_update, 0, sizeof(stream_update));
2523 	stream_update.dpms_off = &dpms_off;
2524 
2525 	mutex_lock(&adev->dm.dc_lock);
2526 	stream_state = dc_stream_find_from_link(link);
2527 
2528 	if (stream_state == NULL) {
2529 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2530 		mutex_unlock(&adev->dm.dc_lock);
2531 		return;
2532 	}
2533 
2534 	stream_update.stream = stream_state;
2535 	acrtc_state->force_dpms_off = true;
2536 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2537 				     stream_state, &stream_update,
2538 				     stream_state->ctx->dc->current_state);
2539 	mutex_unlock(&adev->dm.dc_lock);
2540 }
2541 
2542 static int dm_resume(void *handle)
2543 {
2544 	struct amdgpu_device *adev = handle;
2545 	struct drm_device *ddev = adev_to_drm(adev);
2546 	struct amdgpu_display_manager *dm = &adev->dm;
2547 	struct amdgpu_dm_connector *aconnector;
2548 	struct drm_connector *connector;
2549 	struct drm_connector_list_iter iter;
2550 	struct drm_crtc *crtc;
2551 	struct drm_crtc_state *new_crtc_state;
2552 	struct dm_crtc_state *dm_new_crtc_state;
2553 	struct drm_plane *plane;
2554 	struct drm_plane_state *new_plane_state;
2555 	struct dm_plane_state *dm_new_plane_state;
2556 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2557 	enum dc_connection_type new_connection_type = dc_connection_none;
2558 	struct dc_state *dc_state;
2559 	int i, r, j;
2560 
2561 	if (amdgpu_in_reset(adev)) {
2562 		dc_state = dm->cached_dc_state;
2563 
2564 		/*
2565 		 * The dc->current_state is backed up into dm->cached_dc_state
2566 		 * before we commit 0 streams.
2567 		 *
2568 		 * DC will clear link encoder assignments on the real state
2569 		 * but the changes won't propagate over to the copy we made
2570 		 * before the 0 streams commit.
2571 		 *
2572 		 * DC expects that link encoder assignments are *not* valid
2573 		 * when committing a state, so as a workaround it needs to be
2574 		 * cleared here.
2575 		 */
2576 		link_enc_cfg_init(dm->dc, dc_state);
2577 
2578 		if (dc_enable_dmub_notifications(adev->dm.dc))
2579 			amdgpu_dm_outbox_init(adev);
2580 
2581 		r = dm_dmub_hw_init(adev);
2582 		if (r)
2583 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2584 
2585 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2586 		dc_resume(dm->dc);
2587 
2588 		amdgpu_dm_irq_resume_early(adev);
2589 
2590 		for (i = 0; i < dc_state->stream_count; i++) {
2591 			dc_state->streams[i]->mode_changed = true;
2592 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2593 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2594 					= 0xffffffff;
2595 			}
2596 		}
2597 
2598 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2599 
2600 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2601 
2602 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2603 
2604 		dc_release_state(dm->cached_dc_state);
2605 		dm->cached_dc_state = NULL;
2606 
2607 		amdgpu_dm_irq_resume_late(adev);
2608 
2609 		mutex_unlock(&dm->dc_lock);
2610 
2611 		return 0;
2612 	}
2613 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2614 	dc_release_state(dm_state->context);
2615 	dm_state->context = dc_create_state(dm->dc);
2616 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2617 	dc_resource_state_construct(dm->dc, dm_state->context);
2618 
2619 	/* Re-enable outbox interrupts for DPIA. */
2620 	if (dc_enable_dmub_notifications(adev->dm.dc))
2621 		amdgpu_dm_outbox_init(adev);
2622 
2623 	/* Before powering on DC we need to re-initialize DMUB. */
2624 	r = dm_dmub_hw_init(adev);
2625 	if (r)
2626 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2627 
2628 	/* power on hardware */
2629 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2630 
2631 	/* program HPD filter */
2632 	dc_resume(dm->dc);
2633 
2634 	/*
2635 	 * early enable HPD Rx IRQ, should be done before set mode as short
2636 	 * pulse interrupts are used for MST
2637 	 */
2638 	amdgpu_dm_irq_resume_early(adev);
2639 
2640 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2641 	s3_handle_mst(ddev, false);
2642 
2643 	/* Do detection*/
2644 	drm_connector_list_iter_begin(ddev, &iter);
2645 	drm_for_each_connector_iter(connector, &iter) {
2646 		aconnector = to_amdgpu_dm_connector(connector);
2647 
2648 		/*
2649 		 * this is the case when traversing through already created
2650 		 * MST connectors, should be skipped
2651 		 */
2652 		if (aconnector->mst_port)
2653 			continue;
2654 
2655 		mutex_lock(&aconnector->hpd_lock);
2656 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2657 			DRM_ERROR("KMS: Failed to detect connector\n");
2658 
2659 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2660 			emulated_link_detect(aconnector->dc_link);
2661 		else
2662 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2663 
2664 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2665 			aconnector->fake_enable = false;
2666 
2667 		if (aconnector->dc_sink)
2668 			dc_sink_release(aconnector->dc_sink);
2669 		aconnector->dc_sink = NULL;
2670 		amdgpu_dm_update_connector_after_detect(aconnector);
2671 		mutex_unlock(&aconnector->hpd_lock);
2672 	}
2673 	drm_connector_list_iter_end(&iter);
2674 
2675 	/* Force mode set in atomic commit */
2676 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2677 		new_crtc_state->active_changed = true;
2678 
2679 	/*
2680 	 * atomic_check is expected to create the dc states. We need to release
2681 	 * them here, since they were duplicated as part of the suspend
2682 	 * procedure.
2683 	 */
2684 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2685 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2686 		if (dm_new_crtc_state->stream) {
2687 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2688 			dc_stream_release(dm_new_crtc_state->stream);
2689 			dm_new_crtc_state->stream = NULL;
2690 		}
2691 	}
2692 
2693 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2694 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2695 		if (dm_new_plane_state->dc_state) {
2696 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2697 			dc_plane_state_release(dm_new_plane_state->dc_state);
2698 			dm_new_plane_state->dc_state = NULL;
2699 		}
2700 	}
2701 
2702 	drm_atomic_helper_resume(ddev, dm->cached_state);
2703 
2704 	dm->cached_state = NULL;
2705 
2706 	amdgpu_dm_irq_resume_late(adev);
2707 
2708 	amdgpu_dm_smu_write_watermarks_table(adev);
2709 
2710 	return 0;
2711 }
2712 
2713 /**
2714  * DOC: DM Lifecycle
2715  *
2716  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2717  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2718  * the base driver's device list to be initialized and torn down accordingly.
2719  *
2720  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2721  */
2722 
2723 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2724 	.name = "dm",
2725 	.early_init = dm_early_init,
2726 	.late_init = dm_late_init,
2727 	.sw_init = dm_sw_init,
2728 	.sw_fini = dm_sw_fini,
2729 	.early_fini = amdgpu_dm_early_fini,
2730 	.hw_init = dm_hw_init,
2731 	.hw_fini = dm_hw_fini,
2732 	.suspend = dm_suspend,
2733 	.resume = dm_resume,
2734 	.is_idle = dm_is_idle,
2735 	.wait_for_idle = dm_wait_for_idle,
2736 	.check_soft_reset = dm_check_soft_reset,
2737 	.soft_reset = dm_soft_reset,
2738 	.set_clockgating_state = dm_set_clockgating_state,
2739 	.set_powergating_state = dm_set_powergating_state,
2740 };
2741 
2742 const struct amdgpu_ip_block_version dm_ip_block =
2743 {
2744 	.type = AMD_IP_BLOCK_TYPE_DCE,
2745 	.major = 1,
2746 	.minor = 0,
2747 	.rev = 0,
2748 	.funcs = &amdgpu_dm_funcs,
2749 };
2750 
2751 
2752 /**
2753  * DOC: atomic
2754  *
2755  * *WIP*
2756  */
2757 
2758 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2759 	.fb_create = amdgpu_display_user_framebuffer_create,
2760 	.get_format_info = amd_get_format_info,
2761 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2762 	.atomic_check = amdgpu_dm_atomic_check,
2763 	.atomic_commit = drm_atomic_helper_commit,
2764 };
2765 
2766 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2767 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2768 };
2769 
2770 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2771 {
2772 	u32 max_cll, min_cll, max, min, q, r;
2773 	struct amdgpu_dm_backlight_caps *caps;
2774 	struct amdgpu_display_manager *dm;
2775 	struct drm_connector *conn_base;
2776 	struct amdgpu_device *adev;
2777 	struct dc_link *link = NULL;
2778 	static const u8 pre_computed_values[] = {
2779 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2780 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2781 	int i;
2782 
2783 	if (!aconnector || !aconnector->dc_link)
2784 		return;
2785 
2786 	link = aconnector->dc_link;
2787 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2788 		return;
2789 
2790 	conn_base = &aconnector->base;
2791 	adev = drm_to_adev(conn_base->dev);
2792 	dm = &adev->dm;
2793 	for (i = 0; i < dm->num_of_edps; i++) {
2794 		if (link == dm->backlight_link[i])
2795 			break;
2796 	}
2797 	if (i >= dm->num_of_edps)
2798 		return;
2799 	caps = &dm->backlight_caps[i];
2800 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2801 	caps->aux_support = false;
2802 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2803 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2804 
2805 	if (caps->ext_caps->bits.oled == 1 /*||
2806 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2807 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2808 		caps->aux_support = true;
2809 
2810 	if (amdgpu_backlight == 0)
2811 		caps->aux_support = false;
2812 	else if (amdgpu_backlight == 1)
2813 		caps->aux_support = true;
2814 
2815 	/* From the specification (CTA-861-G), for calculating the maximum
2816 	 * luminance we need to use:
2817 	 *	Luminance = 50*2**(CV/32)
2818 	 * Where CV is a one-byte value.
2819 	 * For calculating this expression we may need float point precision;
2820 	 * to avoid this complexity level, we take advantage that CV is divided
2821 	 * by a constant. From the Euclids division algorithm, we know that CV
2822 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2823 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2824 	 * need to pre-compute the value of r/32. For pre-computing the values
2825 	 * We just used the following Ruby line:
2826 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2827 	 * The results of the above expressions can be verified at
2828 	 * pre_computed_values.
2829 	 */
2830 	q = max_cll >> 5;
2831 	r = max_cll % 32;
2832 	max = (1 << q) * pre_computed_values[r];
2833 
2834 	// min luminance: maxLum * (CV/255)^2 / 100
2835 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2836 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2837 
2838 	caps->aux_max_input_signal = max;
2839 	caps->aux_min_input_signal = min;
2840 }
2841 
2842 void amdgpu_dm_update_connector_after_detect(
2843 		struct amdgpu_dm_connector *aconnector)
2844 {
2845 	struct drm_connector *connector = &aconnector->base;
2846 	struct drm_device *dev = connector->dev;
2847 	struct dc_sink *sink;
2848 
2849 	/* MST handled by drm_mst framework */
2850 	if (aconnector->mst_mgr.mst_state == true)
2851 		return;
2852 
2853 	sink = aconnector->dc_link->local_sink;
2854 	if (sink)
2855 		dc_sink_retain(sink);
2856 
2857 	/*
2858 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2859 	 * the connector sink is set to either fake or physical sink depends on link status.
2860 	 * Skip if already done during boot.
2861 	 */
2862 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2863 			&& aconnector->dc_em_sink) {
2864 
2865 		/*
2866 		 * For S3 resume with headless use eml_sink to fake stream
2867 		 * because on resume connector->sink is set to NULL
2868 		 */
2869 		mutex_lock(&dev->mode_config.mutex);
2870 
2871 		if (sink) {
2872 			if (aconnector->dc_sink) {
2873 				amdgpu_dm_update_freesync_caps(connector, NULL);
2874 				/*
2875 				 * retain and release below are used to
2876 				 * bump up refcount for sink because the link doesn't point
2877 				 * to it anymore after disconnect, so on next crtc to connector
2878 				 * reshuffle by UMD we will get into unwanted dc_sink release
2879 				 */
2880 				dc_sink_release(aconnector->dc_sink);
2881 			}
2882 			aconnector->dc_sink = sink;
2883 			dc_sink_retain(aconnector->dc_sink);
2884 			amdgpu_dm_update_freesync_caps(connector,
2885 					aconnector->edid);
2886 		} else {
2887 			amdgpu_dm_update_freesync_caps(connector, NULL);
2888 			if (!aconnector->dc_sink) {
2889 				aconnector->dc_sink = aconnector->dc_em_sink;
2890 				dc_sink_retain(aconnector->dc_sink);
2891 			}
2892 		}
2893 
2894 		mutex_unlock(&dev->mode_config.mutex);
2895 
2896 		if (sink)
2897 			dc_sink_release(sink);
2898 		return;
2899 	}
2900 
2901 	/*
2902 	 * TODO: temporary guard to look for proper fix
2903 	 * if this sink is MST sink, we should not do anything
2904 	 */
2905 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2906 		dc_sink_release(sink);
2907 		return;
2908 	}
2909 
2910 	if (aconnector->dc_sink == sink) {
2911 		/*
2912 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2913 		 * Do nothing!!
2914 		 */
2915 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2916 				aconnector->connector_id);
2917 		if (sink)
2918 			dc_sink_release(sink);
2919 		return;
2920 	}
2921 
2922 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2923 		aconnector->connector_id, aconnector->dc_sink, sink);
2924 
2925 	mutex_lock(&dev->mode_config.mutex);
2926 
2927 	/*
2928 	 * 1. Update status of the drm connector
2929 	 * 2. Send an event and let userspace tell us what to do
2930 	 */
2931 	if (sink) {
2932 		/*
2933 		 * TODO: check if we still need the S3 mode update workaround.
2934 		 * If yes, put it here.
2935 		 */
2936 		if (aconnector->dc_sink) {
2937 			amdgpu_dm_update_freesync_caps(connector, NULL);
2938 			dc_sink_release(aconnector->dc_sink);
2939 		}
2940 
2941 		aconnector->dc_sink = sink;
2942 		dc_sink_retain(aconnector->dc_sink);
2943 		if (sink->dc_edid.length == 0) {
2944 			aconnector->edid = NULL;
2945 			if (aconnector->dc_link->aux_mode) {
2946 				drm_dp_cec_unset_edid(
2947 					&aconnector->dm_dp_aux.aux);
2948 			}
2949 		} else {
2950 			aconnector->edid =
2951 				(struct edid *)sink->dc_edid.raw_edid;
2952 
2953 			drm_connector_update_edid_property(connector,
2954 							   aconnector->edid);
2955 			if (aconnector->dc_link->aux_mode)
2956 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2957 						    aconnector->edid);
2958 		}
2959 
2960 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2961 		update_connector_ext_caps(aconnector);
2962 	} else {
2963 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2964 		amdgpu_dm_update_freesync_caps(connector, NULL);
2965 		drm_connector_update_edid_property(connector, NULL);
2966 		aconnector->num_modes = 0;
2967 		dc_sink_release(aconnector->dc_sink);
2968 		aconnector->dc_sink = NULL;
2969 		aconnector->edid = NULL;
2970 #ifdef CONFIG_DRM_AMD_DC_HDCP
2971 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2972 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2973 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2974 #endif
2975 	}
2976 
2977 	mutex_unlock(&dev->mode_config.mutex);
2978 
2979 	update_subconnector_property(aconnector);
2980 
2981 	if (sink)
2982 		dc_sink_release(sink);
2983 }
2984 
2985 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2986 {
2987 	struct drm_connector *connector = &aconnector->base;
2988 	struct drm_device *dev = connector->dev;
2989 	enum dc_connection_type new_connection_type = dc_connection_none;
2990 	struct amdgpu_device *adev = drm_to_adev(dev);
2991 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2992 	struct dm_crtc_state *dm_crtc_state = NULL;
2993 
2994 	if (adev->dm.disable_hpd_irq)
2995 		return;
2996 
2997 	if (dm_con_state->base.state && dm_con_state->base.crtc)
2998 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2999 					dm_con_state->base.state,
3000 					dm_con_state->base.crtc));
3001 	/*
3002 	 * In case of failure or MST no need to update connector status or notify the OS
3003 	 * since (for MST case) MST does this in its own context.
3004 	 */
3005 	mutex_lock(&aconnector->hpd_lock);
3006 
3007 #ifdef CONFIG_DRM_AMD_DC_HDCP
3008 	if (adev->dm.hdcp_workqueue) {
3009 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3010 		dm_con_state->update_hdcp = true;
3011 	}
3012 #endif
3013 	if (aconnector->fake_enable)
3014 		aconnector->fake_enable = false;
3015 
3016 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3017 		DRM_ERROR("KMS: Failed to detect connector\n");
3018 
3019 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3020 		emulated_link_detect(aconnector->dc_link);
3021 
3022 		drm_modeset_lock_all(dev);
3023 		dm_restore_drm_connector_state(dev, connector);
3024 		drm_modeset_unlock_all(dev);
3025 
3026 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3027 			drm_kms_helper_connector_hotplug_event(connector);
3028 
3029 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3030 		if (new_connection_type == dc_connection_none &&
3031 		    aconnector->dc_link->type == dc_connection_none &&
3032 		    dm_crtc_state)
3033 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3034 
3035 		amdgpu_dm_update_connector_after_detect(aconnector);
3036 
3037 		drm_modeset_lock_all(dev);
3038 		dm_restore_drm_connector_state(dev, connector);
3039 		drm_modeset_unlock_all(dev);
3040 
3041 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3042 			drm_kms_helper_connector_hotplug_event(connector);
3043 	}
3044 	mutex_unlock(&aconnector->hpd_lock);
3045 
3046 }
3047 
3048 static void handle_hpd_irq(void *param)
3049 {
3050 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3051 
3052 	handle_hpd_irq_helper(aconnector);
3053 
3054 }
3055 
3056 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3057 {
3058 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3059 	uint8_t dret;
3060 	bool new_irq_handled = false;
3061 	int dpcd_addr;
3062 	int dpcd_bytes_to_read;
3063 
3064 	const int max_process_count = 30;
3065 	int process_count = 0;
3066 
3067 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3068 
3069 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3070 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3071 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3072 		dpcd_addr = DP_SINK_COUNT;
3073 	} else {
3074 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3075 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3076 		dpcd_addr = DP_SINK_COUNT_ESI;
3077 	}
3078 
3079 	dret = drm_dp_dpcd_read(
3080 		&aconnector->dm_dp_aux.aux,
3081 		dpcd_addr,
3082 		esi,
3083 		dpcd_bytes_to_read);
3084 
3085 	while (dret == dpcd_bytes_to_read &&
3086 		process_count < max_process_count) {
3087 		uint8_t retry;
3088 		dret = 0;
3089 
3090 		process_count++;
3091 
3092 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3093 		/* handle HPD short pulse irq */
3094 		if (aconnector->mst_mgr.mst_state)
3095 			drm_dp_mst_hpd_irq(
3096 				&aconnector->mst_mgr,
3097 				esi,
3098 				&new_irq_handled);
3099 
3100 		if (new_irq_handled) {
3101 			/* ACK at DPCD to notify down stream */
3102 			const int ack_dpcd_bytes_to_write =
3103 				dpcd_bytes_to_read - 1;
3104 
3105 			for (retry = 0; retry < 3; retry++) {
3106 				uint8_t wret;
3107 
3108 				wret = drm_dp_dpcd_write(
3109 					&aconnector->dm_dp_aux.aux,
3110 					dpcd_addr + 1,
3111 					&esi[1],
3112 					ack_dpcd_bytes_to_write);
3113 				if (wret == ack_dpcd_bytes_to_write)
3114 					break;
3115 			}
3116 
3117 			/* check if there is new irq to be handled */
3118 			dret = drm_dp_dpcd_read(
3119 				&aconnector->dm_dp_aux.aux,
3120 				dpcd_addr,
3121 				esi,
3122 				dpcd_bytes_to_read);
3123 
3124 			new_irq_handled = false;
3125 		} else {
3126 			break;
3127 		}
3128 	}
3129 
3130 	if (process_count == max_process_count)
3131 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3132 }
3133 
3134 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3135 							union hpd_irq_data hpd_irq_data)
3136 {
3137 	struct hpd_rx_irq_offload_work *offload_work =
3138 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3139 
3140 	if (!offload_work) {
3141 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3142 		return;
3143 	}
3144 
3145 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3146 	offload_work->data = hpd_irq_data;
3147 	offload_work->offload_wq = offload_wq;
3148 
3149 	queue_work(offload_wq->wq, &offload_work->work);
3150 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3151 }
3152 
3153 static void handle_hpd_rx_irq(void *param)
3154 {
3155 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3156 	struct drm_connector *connector = &aconnector->base;
3157 	struct drm_device *dev = connector->dev;
3158 	struct dc_link *dc_link = aconnector->dc_link;
3159 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3160 	bool result = false;
3161 	enum dc_connection_type new_connection_type = dc_connection_none;
3162 	struct amdgpu_device *adev = drm_to_adev(dev);
3163 	union hpd_irq_data hpd_irq_data;
3164 	bool link_loss = false;
3165 	bool has_left_work = false;
3166 	int idx = aconnector->base.index;
3167 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3168 
3169 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3170 
3171 	if (adev->dm.disable_hpd_irq)
3172 		return;
3173 
3174 	/*
3175 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3176 	 * conflict, after implement i2c helper, this mutex should be
3177 	 * retired.
3178 	 */
3179 	mutex_lock(&aconnector->hpd_lock);
3180 
3181 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3182 						&link_loss, true, &has_left_work);
3183 
3184 	if (!has_left_work)
3185 		goto out;
3186 
3187 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3188 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3189 		goto out;
3190 	}
3191 
3192 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3193 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3194 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3195 			dm_handle_mst_sideband_msg(aconnector);
3196 			goto out;
3197 		}
3198 
3199 		if (link_loss) {
3200 			bool skip = false;
3201 
3202 			spin_lock(&offload_wq->offload_lock);
3203 			skip = offload_wq->is_handling_link_loss;
3204 
3205 			if (!skip)
3206 				offload_wq->is_handling_link_loss = true;
3207 
3208 			spin_unlock(&offload_wq->offload_lock);
3209 
3210 			if (!skip)
3211 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3212 
3213 			goto out;
3214 		}
3215 	}
3216 
3217 out:
3218 	if (result && !is_mst_root_connector) {
3219 		/* Downstream Port status changed. */
3220 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3221 			DRM_ERROR("KMS: Failed to detect connector\n");
3222 
3223 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3224 			emulated_link_detect(dc_link);
3225 
3226 			if (aconnector->fake_enable)
3227 				aconnector->fake_enable = false;
3228 
3229 			amdgpu_dm_update_connector_after_detect(aconnector);
3230 
3231 
3232 			drm_modeset_lock_all(dev);
3233 			dm_restore_drm_connector_state(dev, connector);
3234 			drm_modeset_unlock_all(dev);
3235 
3236 			drm_kms_helper_connector_hotplug_event(connector);
3237 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3238 
3239 			if (aconnector->fake_enable)
3240 				aconnector->fake_enable = false;
3241 
3242 			amdgpu_dm_update_connector_after_detect(aconnector);
3243 
3244 
3245 			drm_modeset_lock_all(dev);
3246 			dm_restore_drm_connector_state(dev, connector);
3247 			drm_modeset_unlock_all(dev);
3248 
3249 			drm_kms_helper_connector_hotplug_event(connector);
3250 		}
3251 	}
3252 #ifdef CONFIG_DRM_AMD_DC_HDCP
3253 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3254 		if (adev->dm.hdcp_workqueue)
3255 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3256 	}
3257 #endif
3258 
3259 	if (dc_link->type != dc_connection_mst_branch)
3260 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3261 
3262 	mutex_unlock(&aconnector->hpd_lock);
3263 }
3264 
3265 static void register_hpd_handlers(struct amdgpu_device *adev)
3266 {
3267 	struct drm_device *dev = adev_to_drm(adev);
3268 	struct drm_connector *connector;
3269 	struct amdgpu_dm_connector *aconnector;
3270 	const struct dc_link *dc_link;
3271 	struct dc_interrupt_params int_params = {0};
3272 
3273 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3274 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3275 
3276 	list_for_each_entry(connector,
3277 			&dev->mode_config.connector_list, head)	{
3278 
3279 		aconnector = to_amdgpu_dm_connector(connector);
3280 		dc_link = aconnector->dc_link;
3281 
3282 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3283 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3284 			int_params.irq_source = dc_link->irq_source_hpd;
3285 
3286 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3287 					handle_hpd_irq,
3288 					(void *) aconnector);
3289 		}
3290 
3291 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3292 
3293 			/* Also register for DP short pulse (hpd_rx). */
3294 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3295 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3296 
3297 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3298 					handle_hpd_rx_irq,
3299 					(void *) aconnector);
3300 
3301 			if (adev->dm.hpd_rx_offload_wq)
3302 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3303 					aconnector;
3304 		}
3305 	}
3306 }
3307 
3308 #if defined(CONFIG_DRM_AMD_DC_SI)
3309 /* Register IRQ sources and initialize IRQ callbacks */
3310 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3311 {
3312 	struct dc *dc = adev->dm.dc;
3313 	struct common_irq_params *c_irq_params;
3314 	struct dc_interrupt_params int_params = {0};
3315 	int r;
3316 	int i;
3317 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3318 
3319 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3320 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3321 
3322 	/*
3323 	 * Actions of amdgpu_irq_add_id():
3324 	 * 1. Register a set() function with base driver.
3325 	 *    Base driver will call set() function to enable/disable an
3326 	 *    interrupt in DC hardware.
3327 	 * 2. Register amdgpu_dm_irq_handler().
3328 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3329 	 *    coming from DC hardware.
3330 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3331 	 *    for acknowledging and handling. */
3332 
3333 	/* Use VBLANK interrupt */
3334 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3335 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3336 		if (r) {
3337 			DRM_ERROR("Failed to add crtc irq id!\n");
3338 			return r;
3339 		}
3340 
3341 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3342 		int_params.irq_source =
3343 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3344 
3345 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3346 
3347 		c_irq_params->adev = adev;
3348 		c_irq_params->irq_src = int_params.irq_source;
3349 
3350 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3351 				dm_crtc_high_irq, c_irq_params);
3352 	}
3353 
3354 	/* Use GRPH_PFLIP interrupt */
3355 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3356 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3357 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3358 		if (r) {
3359 			DRM_ERROR("Failed to add page flip irq id!\n");
3360 			return r;
3361 		}
3362 
3363 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3364 		int_params.irq_source =
3365 			dc_interrupt_to_irq_source(dc, i, 0);
3366 
3367 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3368 
3369 		c_irq_params->adev = adev;
3370 		c_irq_params->irq_src = int_params.irq_source;
3371 
3372 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3373 				dm_pflip_high_irq, c_irq_params);
3374 
3375 	}
3376 
3377 	/* HPD */
3378 	r = amdgpu_irq_add_id(adev, client_id,
3379 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3380 	if (r) {
3381 		DRM_ERROR("Failed to add hpd irq id!\n");
3382 		return r;
3383 	}
3384 
3385 	register_hpd_handlers(adev);
3386 
3387 	return 0;
3388 }
3389 #endif
3390 
3391 /* Register IRQ sources and initialize IRQ callbacks */
3392 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3393 {
3394 	struct dc *dc = adev->dm.dc;
3395 	struct common_irq_params *c_irq_params;
3396 	struct dc_interrupt_params int_params = {0};
3397 	int r;
3398 	int i;
3399 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3400 
3401 	if (adev->family >= AMDGPU_FAMILY_AI)
3402 		client_id = SOC15_IH_CLIENTID_DCE;
3403 
3404 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3405 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3406 
3407 	/*
3408 	 * Actions of amdgpu_irq_add_id():
3409 	 * 1. Register a set() function with base driver.
3410 	 *    Base driver will call set() function to enable/disable an
3411 	 *    interrupt in DC hardware.
3412 	 * 2. Register amdgpu_dm_irq_handler().
3413 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3414 	 *    coming from DC hardware.
3415 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3416 	 *    for acknowledging and handling. */
3417 
3418 	/* Use VBLANK interrupt */
3419 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3420 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3421 		if (r) {
3422 			DRM_ERROR("Failed to add crtc irq id!\n");
3423 			return r;
3424 		}
3425 
3426 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3427 		int_params.irq_source =
3428 			dc_interrupt_to_irq_source(dc, i, 0);
3429 
3430 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3431 
3432 		c_irq_params->adev = adev;
3433 		c_irq_params->irq_src = int_params.irq_source;
3434 
3435 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3436 				dm_crtc_high_irq, c_irq_params);
3437 	}
3438 
3439 	/* Use VUPDATE interrupt */
3440 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3441 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3442 		if (r) {
3443 			DRM_ERROR("Failed to add vupdate irq id!\n");
3444 			return r;
3445 		}
3446 
3447 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3448 		int_params.irq_source =
3449 			dc_interrupt_to_irq_source(dc, i, 0);
3450 
3451 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3452 
3453 		c_irq_params->adev = adev;
3454 		c_irq_params->irq_src = int_params.irq_source;
3455 
3456 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3457 				dm_vupdate_high_irq, c_irq_params);
3458 	}
3459 
3460 	/* Use GRPH_PFLIP interrupt */
3461 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3462 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3463 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3464 		if (r) {
3465 			DRM_ERROR("Failed to add page flip irq id!\n");
3466 			return r;
3467 		}
3468 
3469 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3470 		int_params.irq_source =
3471 			dc_interrupt_to_irq_source(dc, i, 0);
3472 
3473 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3474 
3475 		c_irq_params->adev = adev;
3476 		c_irq_params->irq_src = int_params.irq_source;
3477 
3478 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3479 				dm_pflip_high_irq, c_irq_params);
3480 
3481 	}
3482 
3483 	/* HPD */
3484 	r = amdgpu_irq_add_id(adev, client_id,
3485 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3486 	if (r) {
3487 		DRM_ERROR("Failed to add hpd irq id!\n");
3488 		return r;
3489 	}
3490 
3491 	register_hpd_handlers(adev);
3492 
3493 	return 0;
3494 }
3495 
3496 #if defined(CONFIG_DRM_AMD_DC_DCN)
3497 /* Register IRQ sources and initialize IRQ callbacks */
3498 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3499 {
3500 	struct dc *dc = adev->dm.dc;
3501 	struct common_irq_params *c_irq_params;
3502 	struct dc_interrupt_params int_params = {0};
3503 	int r;
3504 	int i;
3505 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3506 	static const unsigned int vrtl_int_srcid[] = {
3507 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3508 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3509 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3510 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3511 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3512 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3513 	};
3514 #endif
3515 
3516 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3517 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3518 
3519 	/*
3520 	 * Actions of amdgpu_irq_add_id():
3521 	 * 1. Register a set() function with base driver.
3522 	 *    Base driver will call set() function to enable/disable an
3523 	 *    interrupt in DC hardware.
3524 	 * 2. Register amdgpu_dm_irq_handler().
3525 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3526 	 *    coming from DC hardware.
3527 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3528 	 *    for acknowledging and handling.
3529 	 */
3530 
3531 	/* Use VSTARTUP interrupt */
3532 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3533 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3534 			i++) {
3535 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3536 
3537 		if (r) {
3538 			DRM_ERROR("Failed to add crtc irq id!\n");
3539 			return r;
3540 		}
3541 
3542 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3543 		int_params.irq_source =
3544 			dc_interrupt_to_irq_source(dc, i, 0);
3545 
3546 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3547 
3548 		c_irq_params->adev = adev;
3549 		c_irq_params->irq_src = int_params.irq_source;
3550 
3551 		amdgpu_dm_irq_register_interrupt(
3552 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3553 	}
3554 
3555 	/* Use otg vertical line interrupt */
3556 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3557 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3558 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3559 				vrtl_int_srcid[i], &adev->vline0_irq);
3560 
3561 		if (r) {
3562 			DRM_ERROR("Failed to add vline0 irq id!\n");
3563 			return r;
3564 		}
3565 
3566 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3567 		int_params.irq_source =
3568 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3569 
3570 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3571 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3572 			break;
3573 		}
3574 
3575 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3576 					- DC_IRQ_SOURCE_DC1_VLINE0];
3577 
3578 		c_irq_params->adev = adev;
3579 		c_irq_params->irq_src = int_params.irq_source;
3580 
3581 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3582 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3583 	}
3584 #endif
3585 
3586 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3587 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3588 	 * to trigger at end of each vblank, regardless of state of the lock,
3589 	 * matching DCE behaviour.
3590 	 */
3591 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3592 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3593 	     i++) {
3594 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3595 
3596 		if (r) {
3597 			DRM_ERROR("Failed to add vupdate irq id!\n");
3598 			return r;
3599 		}
3600 
3601 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3602 		int_params.irq_source =
3603 			dc_interrupt_to_irq_source(dc, i, 0);
3604 
3605 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3606 
3607 		c_irq_params->adev = adev;
3608 		c_irq_params->irq_src = int_params.irq_source;
3609 
3610 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3611 				dm_vupdate_high_irq, c_irq_params);
3612 	}
3613 
3614 	/* Use GRPH_PFLIP interrupt */
3615 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3616 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3617 			i++) {
3618 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3619 		if (r) {
3620 			DRM_ERROR("Failed to add page flip irq id!\n");
3621 			return r;
3622 		}
3623 
3624 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3625 		int_params.irq_source =
3626 			dc_interrupt_to_irq_source(dc, i, 0);
3627 
3628 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3629 
3630 		c_irq_params->adev = adev;
3631 		c_irq_params->irq_src = int_params.irq_source;
3632 
3633 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3634 				dm_pflip_high_irq, c_irq_params);
3635 
3636 	}
3637 
3638 	/* HPD */
3639 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3640 			&adev->hpd_irq);
3641 	if (r) {
3642 		DRM_ERROR("Failed to add hpd irq id!\n");
3643 		return r;
3644 	}
3645 
3646 	register_hpd_handlers(adev);
3647 
3648 	return 0;
3649 }
3650 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3651 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3652 {
3653 	struct dc *dc = adev->dm.dc;
3654 	struct common_irq_params *c_irq_params;
3655 	struct dc_interrupt_params int_params = {0};
3656 	int r, i;
3657 
3658 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3659 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3660 
3661 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3662 			&adev->dmub_outbox_irq);
3663 	if (r) {
3664 		DRM_ERROR("Failed to add outbox irq id!\n");
3665 		return r;
3666 	}
3667 
3668 	if (dc->ctx->dmub_srv) {
3669 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3670 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3671 		int_params.irq_source =
3672 		dc_interrupt_to_irq_source(dc, i, 0);
3673 
3674 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3675 
3676 		c_irq_params->adev = adev;
3677 		c_irq_params->irq_src = int_params.irq_source;
3678 
3679 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3680 				dm_dmub_outbox1_low_irq, c_irq_params);
3681 	}
3682 
3683 	return 0;
3684 }
3685 #endif
3686 
3687 /*
3688  * Acquires the lock for the atomic state object and returns
3689  * the new atomic state.
3690  *
3691  * This should only be called during atomic check.
3692  */
3693 static int dm_atomic_get_state(struct drm_atomic_state *state,
3694 			       struct dm_atomic_state **dm_state)
3695 {
3696 	struct drm_device *dev = state->dev;
3697 	struct amdgpu_device *adev = drm_to_adev(dev);
3698 	struct amdgpu_display_manager *dm = &adev->dm;
3699 	struct drm_private_state *priv_state;
3700 
3701 	if (*dm_state)
3702 		return 0;
3703 
3704 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3705 	if (IS_ERR(priv_state))
3706 		return PTR_ERR(priv_state);
3707 
3708 	*dm_state = to_dm_atomic_state(priv_state);
3709 
3710 	return 0;
3711 }
3712 
3713 static struct dm_atomic_state *
3714 dm_atomic_get_new_state(struct drm_atomic_state *state)
3715 {
3716 	struct drm_device *dev = state->dev;
3717 	struct amdgpu_device *adev = drm_to_adev(dev);
3718 	struct amdgpu_display_manager *dm = &adev->dm;
3719 	struct drm_private_obj *obj;
3720 	struct drm_private_state *new_obj_state;
3721 	int i;
3722 
3723 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3724 		if (obj->funcs == dm->atomic_obj.funcs)
3725 			return to_dm_atomic_state(new_obj_state);
3726 	}
3727 
3728 	return NULL;
3729 }
3730 
3731 static struct drm_private_state *
3732 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3733 {
3734 	struct dm_atomic_state *old_state, *new_state;
3735 
3736 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3737 	if (!new_state)
3738 		return NULL;
3739 
3740 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3741 
3742 	old_state = to_dm_atomic_state(obj->state);
3743 
3744 	if (old_state && old_state->context)
3745 		new_state->context = dc_copy_state(old_state->context);
3746 
3747 	if (!new_state->context) {
3748 		kfree(new_state);
3749 		return NULL;
3750 	}
3751 
3752 	return &new_state->base;
3753 }
3754 
3755 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3756 				    struct drm_private_state *state)
3757 {
3758 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3759 
3760 	if (dm_state && dm_state->context)
3761 		dc_release_state(dm_state->context);
3762 
3763 	kfree(dm_state);
3764 }
3765 
3766 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3767 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3768 	.atomic_destroy_state = dm_atomic_destroy_state,
3769 };
3770 
3771 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3772 {
3773 	struct dm_atomic_state *state;
3774 	int r;
3775 
3776 	adev->mode_info.mode_config_initialized = true;
3777 
3778 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3779 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3780 
3781 	adev_to_drm(adev)->mode_config.max_width = 16384;
3782 	adev_to_drm(adev)->mode_config.max_height = 16384;
3783 
3784 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3785 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3786 	/* indicates support for immediate flip */
3787 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3788 
3789 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3790 
3791 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3792 	if (!state)
3793 		return -ENOMEM;
3794 
3795 	state->context = dc_create_state(adev->dm.dc);
3796 	if (!state->context) {
3797 		kfree(state);
3798 		return -ENOMEM;
3799 	}
3800 
3801 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3802 
3803 	drm_atomic_private_obj_init(adev_to_drm(adev),
3804 				    &adev->dm.atomic_obj,
3805 				    &state->base,
3806 				    &dm_atomic_state_funcs);
3807 
3808 	r = amdgpu_display_modeset_create_props(adev);
3809 	if (r) {
3810 		dc_release_state(state->context);
3811 		kfree(state);
3812 		return r;
3813 	}
3814 
3815 	r = amdgpu_dm_audio_init(adev);
3816 	if (r) {
3817 		dc_release_state(state->context);
3818 		kfree(state);
3819 		return r;
3820 	}
3821 
3822 	return 0;
3823 }
3824 
3825 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3826 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3827 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3828 
3829 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3830 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3831 
3832 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3833 					    int bl_idx)
3834 {
3835 #if defined(CONFIG_ACPI)
3836 	struct amdgpu_dm_backlight_caps caps;
3837 
3838 	memset(&caps, 0, sizeof(caps));
3839 
3840 	if (dm->backlight_caps[bl_idx].caps_valid)
3841 		return;
3842 
3843 	amdgpu_acpi_get_backlight_caps(&caps);
3844 	if (caps.caps_valid) {
3845 		dm->backlight_caps[bl_idx].caps_valid = true;
3846 		if (caps.aux_support)
3847 			return;
3848 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3849 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3850 	} else {
3851 		dm->backlight_caps[bl_idx].min_input_signal =
3852 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3853 		dm->backlight_caps[bl_idx].max_input_signal =
3854 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3855 	}
3856 #else
3857 	if (dm->backlight_caps[bl_idx].aux_support)
3858 		return;
3859 
3860 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3861 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3862 #endif
3863 }
3864 
3865 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3866 				unsigned *min, unsigned *max)
3867 {
3868 	if (!caps)
3869 		return 0;
3870 
3871 	if (caps->aux_support) {
3872 		// Firmware limits are in nits, DC API wants millinits.
3873 		*max = 1000 * caps->aux_max_input_signal;
3874 		*min = 1000 * caps->aux_min_input_signal;
3875 	} else {
3876 		// Firmware limits are 8-bit, PWM control is 16-bit.
3877 		*max = 0x101 * caps->max_input_signal;
3878 		*min = 0x101 * caps->min_input_signal;
3879 	}
3880 	return 1;
3881 }
3882 
3883 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3884 					uint32_t brightness)
3885 {
3886 	unsigned min, max;
3887 
3888 	if (!get_brightness_range(caps, &min, &max))
3889 		return brightness;
3890 
3891 	// Rescale 0..255 to min..max
3892 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3893 				       AMDGPU_MAX_BL_LEVEL);
3894 }
3895 
3896 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3897 				      uint32_t brightness)
3898 {
3899 	unsigned min, max;
3900 
3901 	if (!get_brightness_range(caps, &min, &max))
3902 		return brightness;
3903 
3904 	if (brightness < min)
3905 		return 0;
3906 	// Rescale min..max to 0..255
3907 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3908 				 max - min);
3909 }
3910 
3911 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3912 					 int bl_idx,
3913 					 u32 user_brightness)
3914 {
3915 	struct amdgpu_dm_backlight_caps caps;
3916 	struct dc_link *link;
3917 	u32 brightness;
3918 	bool rc;
3919 
3920 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3921 	caps = dm->backlight_caps[bl_idx];
3922 
3923 	dm->brightness[bl_idx] = user_brightness;
3924 	/* update scratch register */
3925 	if (bl_idx == 0)
3926 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3927 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3928 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3929 
3930 	/* Change brightness based on AUX property */
3931 	if (caps.aux_support) {
3932 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3933 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3934 		if (!rc)
3935 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3936 	} else {
3937 		rc = dc_link_set_backlight_level(link, brightness, 0);
3938 		if (!rc)
3939 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3940 	}
3941 
3942 	return rc ? 0 : 1;
3943 }
3944 
3945 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3946 {
3947 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3948 	int i;
3949 
3950 	for (i = 0; i < dm->num_of_edps; i++) {
3951 		if (bd == dm->backlight_dev[i])
3952 			break;
3953 	}
3954 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3955 		i = 0;
3956 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3957 
3958 	return 0;
3959 }
3960 
3961 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3962 					 int bl_idx)
3963 {
3964 	struct amdgpu_dm_backlight_caps caps;
3965 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3966 
3967 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3968 	caps = dm->backlight_caps[bl_idx];
3969 
3970 	if (caps.aux_support) {
3971 		u32 avg, peak;
3972 		bool rc;
3973 
3974 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3975 		if (!rc)
3976 			return dm->brightness[bl_idx];
3977 		return convert_brightness_to_user(&caps, avg);
3978 	} else {
3979 		int ret = dc_link_get_backlight_level(link);
3980 
3981 		if (ret == DC_ERROR_UNEXPECTED)
3982 			return dm->brightness[bl_idx];
3983 		return convert_brightness_to_user(&caps, ret);
3984 	}
3985 }
3986 
3987 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3988 {
3989 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3990 	int i;
3991 
3992 	for (i = 0; i < dm->num_of_edps; i++) {
3993 		if (bd == dm->backlight_dev[i])
3994 			break;
3995 	}
3996 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3997 		i = 0;
3998 	return amdgpu_dm_backlight_get_level(dm, i);
3999 }
4000 
4001 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4002 	.options = BL_CORE_SUSPENDRESUME,
4003 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4004 	.update_status	= amdgpu_dm_backlight_update_status,
4005 };
4006 
4007 static void
4008 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4009 {
4010 	char bl_name[16];
4011 	struct backlight_properties props = { 0 };
4012 
4013 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4014 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4015 
4016 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4017 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4018 	props.type = BACKLIGHT_RAW;
4019 
4020 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4021 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4022 
4023 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4024 								       adev_to_drm(dm->adev)->dev,
4025 								       dm,
4026 								       &amdgpu_dm_backlight_ops,
4027 								       &props);
4028 
4029 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4030 		DRM_ERROR("DM: Backlight registration failed!\n");
4031 	else
4032 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4033 }
4034 #endif
4035 
4036 static int initialize_plane(struct amdgpu_display_manager *dm,
4037 			    struct amdgpu_mode_info *mode_info, int plane_id,
4038 			    enum drm_plane_type plane_type,
4039 			    const struct dc_plane_cap *plane_cap)
4040 {
4041 	struct drm_plane *plane;
4042 	unsigned long possible_crtcs;
4043 	int ret = 0;
4044 
4045 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4046 	if (!plane) {
4047 		DRM_ERROR("KMS: Failed to allocate plane\n");
4048 		return -ENOMEM;
4049 	}
4050 	plane->type = plane_type;
4051 
4052 	/*
4053 	 * HACK: IGT tests expect that the primary plane for a CRTC
4054 	 * can only have one possible CRTC. Only expose support for
4055 	 * any CRTC if they're not going to be used as a primary plane
4056 	 * for a CRTC - like overlay or underlay planes.
4057 	 */
4058 	possible_crtcs = 1 << plane_id;
4059 	if (plane_id >= dm->dc->caps.max_streams)
4060 		possible_crtcs = 0xff;
4061 
4062 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4063 
4064 	if (ret) {
4065 		DRM_ERROR("KMS: Failed to initialize plane\n");
4066 		kfree(plane);
4067 		return ret;
4068 	}
4069 
4070 	if (mode_info)
4071 		mode_info->planes[plane_id] = plane;
4072 
4073 	return ret;
4074 }
4075 
4076 
4077 static void register_backlight_device(struct amdgpu_display_manager *dm,
4078 				      struct dc_link *link)
4079 {
4080 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4081 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4082 
4083 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4084 	    link->type != dc_connection_none) {
4085 		/*
4086 		 * Event if registration failed, we should continue with
4087 		 * DM initialization because not having a backlight control
4088 		 * is better then a black screen.
4089 		 */
4090 		if (!dm->backlight_dev[dm->num_of_edps])
4091 			amdgpu_dm_register_backlight_device(dm);
4092 
4093 		if (dm->backlight_dev[dm->num_of_edps]) {
4094 			dm->backlight_link[dm->num_of_edps] = link;
4095 			dm->num_of_edps++;
4096 		}
4097 	}
4098 #endif
4099 }
4100 
4101 
4102 /*
4103  * In this architecture, the association
4104  * connector -> encoder -> crtc
4105  * id not really requried. The crtc and connector will hold the
4106  * display_index as an abstraction to use with DAL component
4107  *
4108  * Returns 0 on success
4109  */
4110 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4111 {
4112 	struct amdgpu_display_manager *dm = &adev->dm;
4113 	int32_t i;
4114 	struct amdgpu_dm_connector *aconnector = NULL;
4115 	struct amdgpu_encoder *aencoder = NULL;
4116 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4117 	uint32_t link_cnt;
4118 	int32_t primary_planes;
4119 	enum dc_connection_type new_connection_type = dc_connection_none;
4120 	const struct dc_plane_cap *plane;
4121 	bool psr_feature_enabled = false;
4122 
4123 	dm->display_indexes_num = dm->dc->caps.max_streams;
4124 	/* Update the actual used number of crtc */
4125 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4126 
4127 	link_cnt = dm->dc->caps.max_links;
4128 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4129 		DRM_ERROR("DM: Failed to initialize mode config\n");
4130 		return -EINVAL;
4131 	}
4132 
4133 	/* There is one primary plane per CRTC */
4134 	primary_planes = dm->dc->caps.max_streams;
4135 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4136 
4137 	/*
4138 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4139 	 * Order is reversed to match iteration order in atomic check.
4140 	 */
4141 	for (i = (primary_planes - 1); i >= 0; i--) {
4142 		plane = &dm->dc->caps.planes[i];
4143 
4144 		if (initialize_plane(dm, mode_info, i,
4145 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4146 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4147 			goto fail;
4148 		}
4149 	}
4150 
4151 	/*
4152 	 * Initialize overlay planes, index starting after primary planes.
4153 	 * These planes have a higher DRM index than the primary planes since
4154 	 * they should be considered as having a higher z-order.
4155 	 * Order is reversed to match iteration order in atomic check.
4156 	 *
4157 	 * Only support DCN for now, and only expose one so we don't encourage
4158 	 * userspace to use up all the pipes.
4159 	 */
4160 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4161 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4162 
4163 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4164 			continue;
4165 
4166 		if (!plane->blends_with_above || !plane->blends_with_below)
4167 			continue;
4168 
4169 		if (!plane->pixel_format_support.argb8888)
4170 			continue;
4171 
4172 		if (initialize_plane(dm, NULL, primary_planes + i,
4173 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4174 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4175 			goto fail;
4176 		}
4177 
4178 		/* Only create one overlay plane. */
4179 		break;
4180 	}
4181 
4182 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4183 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4184 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4185 			goto fail;
4186 		}
4187 
4188 #if defined(CONFIG_DRM_AMD_DC_DCN)
4189 	/* Use Outbox interrupt */
4190 	switch (adev->ip_versions[DCE_HWIP][0]) {
4191 	case IP_VERSION(3, 0, 0):
4192 	case IP_VERSION(3, 1, 2):
4193 	case IP_VERSION(3, 1, 3):
4194 	case IP_VERSION(2, 1, 0):
4195 		if (register_outbox_irq_handlers(dm->adev)) {
4196 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4197 			goto fail;
4198 		}
4199 		break;
4200 	default:
4201 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4202 			      adev->ip_versions[DCE_HWIP][0]);
4203 	}
4204 
4205 	/* Determine whether to enable PSR support by default. */
4206 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4207 		switch (adev->ip_versions[DCE_HWIP][0]) {
4208 		case IP_VERSION(3, 1, 2):
4209 		case IP_VERSION(3, 1, 3):
4210 			psr_feature_enabled = true;
4211 			break;
4212 		default:
4213 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4214 			break;
4215 		}
4216 	}
4217 #endif
4218 
4219 	/* loops over all connectors on the board */
4220 	for (i = 0; i < link_cnt; i++) {
4221 		struct dc_link *link = NULL;
4222 
4223 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4224 			DRM_ERROR(
4225 				"KMS: Cannot support more than %d display indexes\n",
4226 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4227 			continue;
4228 		}
4229 
4230 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4231 		if (!aconnector)
4232 			goto fail;
4233 
4234 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4235 		if (!aencoder)
4236 			goto fail;
4237 
4238 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4239 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4240 			goto fail;
4241 		}
4242 
4243 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4244 			DRM_ERROR("KMS: Failed to initialize connector\n");
4245 			goto fail;
4246 		}
4247 
4248 		link = dc_get_link_at_index(dm->dc, i);
4249 
4250 		if (!dc_link_detect_sink(link, &new_connection_type))
4251 			DRM_ERROR("KMS: Failed to detect connector\n");
4252 
4253 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4254 			emulated_link_detect(link);
4255 			amdgpu_dm_update_connector_after_detect(aconnector);
4256 
4257 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4258 			amdgpu_dm_update_connector_after_detect(aconnector);
4259 			register_backlight_device(dm, link);
4260 			if (dm->num_of_edps)
4261 				update_connector_ext_caps(aconnector);
4262 			if (psr_feature_enabled)
4263 				amdgpu_dm_set_psr_caps(link);
4264 		}
4265 
4266 
4267 	}
4268 
4269 	/*
4270 	 * Disable vblank IRQs aggressively for power-saving.
4271 	 *
4272 	 * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4273 	 * is also supported.
4274 	 */
4275 	adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4276 
4277 	/* Software is initialized. Now we can register interrupt handlers. */
4278 	switch (adev->asic_type) {
4279 #if defined(CONFIG_DRM_AMD_DC_SI)
4280 	case CHIP_TAHITI:
4281 	case CHIP_PITCAIRN:
4282 	case CHIP_VERDE:
4283 	case CHIP_OLAND:
4284 		if (dce60_register_irq_handlers(dm->adev)) {
4285 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4286 			goto fail;
4287 		}
4288 		break;
4289 #endif
4290 	case CHIP_BONAIRE:
4291 	case CHIP_HAWAII:
4292 	case CHIP_KAVERI:
4293 	case CHIP_KABINI:
4294 	case CHIP_MULLINS:
4295 	case CHIP_TONGA:
4296 	case CHIP_FIJI:
4297 	case CHIP_CARRIZO:
4298 	case CHIP_STONEY:
4299 	case CHIP_POLARIS11:
4300 	case CHIP_POLARIS10:
4301 	case CHIP_POLARIS12:
4302 	case CHIP_VEGAM:
4303 	case CHIP_VEGA10:
4304 	case CHIP_VEGA12:
4305 	case CHIP_VEGA20:
4306 		if (dce110_register_irq_handlers(dm->adev)) {
4307 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4308 			goto fail;
4309 		}
4310 		break;
4311 	default:
4312 #if defined(CONFIG_DRM_AMD_DC_DCN)
4313 		switch (adev->ip_versions[DCE_HWIP][0]) {
4314 		case IP_VERSION(1, 0, 0):
4315 		case IP_VERSION(1, 0, 1):
4316 		case IP_VERSION(2, 0, 2):
4317 		case IP_VERSION(2, 0, 3):
4318 		case IP_VERSION(2, 0, 0):
4319 		case IP_VERSION(2, 1, 0):
4320 		case IP_VERSION(3, 0, 0):
4321 		case IP_VERSION(3, 0, 2):
4322 		case IP_VERSION(3, 0, 3):
4323 		case IP_VERSION(3, 0, 1):
4324 		case IP_VERSION(3, 1, 2):
4325 		case IP_VERSION(3, 1, 3):
4326 			if (dcn10_register_irq_handlers(dm->adev)) {
4327 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4328 				goto fail;
4329 			}
4330 			break;
4331 		default:
4332 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4333 					adev->ip_versions[DCE_HWIP][0]);
4334 			goto fail;
4335 		}
4336 #endif
4337 		break;
4338 	}
4339 
4340 	return 0;
4341 fail:
4342 	kfree(aencoder);
4343 	kfree(aconnector);
4344 
4345 	return -EINVAL;
4346 }
4347 
4348 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4349 {
4350 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4351 	return;
4352 }
4353 
4354 /******************************************************************************
4355  * amdgpu_display_funcs functions
4356  *****************************************************************************/
4357 
4358 /*
4359  * dm_bandwidth_update - program display watermarks
4360  *
4361  * @adev: amdgpu_device pointer
4362  *
4363  * Calculate and program the display watermarks and line buffer allocation.
4364  */
4365 static void dm_bandwidth_update(struct amdgpu_device *adev)
4366 {
4367 	/* TODO: implement later */
4368 }
4369 
4370 static const struct amdgpu_display_funcs dm_display_funcs = {
4371 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4372 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4373 	.backlight_set_level = NULL, /* never called for DC */
4374 	.backlight_get_level = NULL, /* never called for DC */
4375 	.hpd_sense = NULL,/* called unconditionally */
4376 	.hpd_set_polarity = NULL, /* called unconditionally */
4377 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4378 	.page_flip_get_scanoutpos =
4379 		dm_crtc_get_scanoutpos,/* called unconditionally */
4380 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4381 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4382 };
4383 
4384 #if defined(CONFIG_DEBUG_KERNEL_DC)
4385 
4386 static ssize_t s3_debug_store(struct device *device,
4387 			      struct device_attribute *attr,
4388 			      const char *buf,
4389 			      size_t count)
4390 {
4391 	int ret;
4392 	int s3_state;
4393 	struct drm_device *drm_dev = dev_get_drvdata(device);
4394 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4395 
4396 	ret = kstrtoint(buf, 0, &s3_state);
4397 
4398 	if (ret == 0) {
4399 		if (s3_state) {
4400 			dm_resume(adev);
4401 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4402 		} else
4403 			dm_suspend(adev);
4404 	}
4405 
4406 	return ret == 0 ? count : 0;
4407 }
4408 
4409 DEVICE_ATTR_WO(s3_debug);
4410 
4411 #endif
4412 
4413 static int dm_early_init(void *handle)
4414 {
4415 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4416 
4417 	switch (adev->asic_type) {
4418 #if defined(CONFIG_DRM_AMD_DC_SI)
4419 	case CHIP_TAHITI:
4420 	case CHIP_PITCAIRN:
4421 	case CHIP_VERDE:
4422 		adev->mode_info.num_crtc = 6;
4423 		adev->mode_info.num_hpd = 6;
4424 		adev->mode_info.num_dig = 6;
4425 		break;
4426 	case CHIP_OLAND:
4427 		adev->mode_info.num_crtc = 2;
4428 		adev->mode_info.num_hpd = 2;
4429 		adev->mode_info.num_dig = 2;
4430 		break;
4431 #endif
4432 	case CHIP_BONAIRE:
4433 	case CHIP_HAWAII:
4434 		adev->mode_info.num_crtc = 6;
4435 		adev->mode_info.num_hpd = 6;
4436 		adev->mode_info.num_dig = 6;
4437 		break;
4438 	case CHIP_KAVERI:
4439 		adev->mode_info.num_crtc = 4;
4440 		adev->mode_info.num_hpd = 6;
4441 		adev->mode_info.num_dig = 7;
4442 		break;
4443 	case CHIP_KABINI:
4444 	case CHIP_MULLINS:
4445 		adev->mode_info.num_crtc = 2;
4446 		adev->mode_info.num_hpd = 6;
4447 		adev->mode_info.num_dig = 6;
4448 		break;
4449 	case CHIP_FIJI:
4450 	case CHIP_TONGA:
4451 		adev->mode_info.num_crtc = 6;
4452 		adev->mode_info.num_hpd = 6;
4453 		adev->mode_info.num_dig = 7;
4454 		break;
4455 	case CHIP_CARRIZO:
4456 		adev->mode_info.num_crtc = 3;
4457 		adev->mode_info.num_hpd = 6;
4458 		adev->mode_info.num_dig = 9;
4459 		break;
4460 	case CHIP_STONEY:
4461 		adev->mode_info.num_crtc = 2;
4462 		adev->mode_info.num_hpd = 6;
4463 		adev->mode_info.num_dig = 9;
4464 		break;
4465 	case CHIP_POLARIS11:
4466 	case CHIP_POLARIS12:
4467 		adev->mode_info.num_crtc = 5;
4468 		adev->mode_info.num_hpd = 5;
4469 		adev->mode_info.num_dig = 5;
4470 		break;
4471 	case CHIP_POLARIS10:
4472 	case CHIP_VEGAM:
4473 		adev->mode_info.num_crtc = 6;
4474 		adev->mode_info.num_hpd = 6;
4475 		adev->mode_info.num_dig = 6;
4476 		break;
4477 	case CHIP_VEGA10:
4478 	case CHIP_VEGA12:
4479 	case CHIP_VEGA20:
4480 		adev->mode_info.num_crtc = 6;
4481 		adev->mode_info.num_hpd = 6;
4482 		adev->mode_info.num_dig = 6;
4483 		break;
4484 	default:
4485 #if defined(CONFIG_DRM_AMD_DC_DCN)
4486 		switch (adev->ip_versions[DCE_HWIP][0]) {
4487 		case IP_VERSION(2, 0, 2):
4488 		case IP_VERSION(3, 0, 0):
4489 			adev->mode_info.num_crtc = 6;
4490 			adev->mode_info.num_hpd = 6;
4491 			adev->mode_info.num_dig = 6;
4492 			break;
4493 		case IP_VERSION(2, 0, 0):
4494 		case IP_VERSION(3, 0, 2):
4495 			adev->mode_info.num_crtc = 5;
4496 			adev->mode_info.num_hpd = 5;
4497 			adev->mode_info.num_dig = 5;
4498 			break;
4499 		case IP_VERSION(2, 0, 3):
4500 		case IP_VERSION(3, 0, 3):
4501 			adev->mode_info.num_crtc = 2;
4502 			adev->mode_info.num_hpd = 2;
4503 			adev->mode_info.num_dig = 2;
4504 			break;
4505 		case IP_VERSION(1, 0, 0):
4506 		case IP_VERSION(1, 0, 1):
4507 		case IP_VERSION(3, 0, 1):
4508 		case IP_VERSION(2, 1, 0):
4509 		case IP_VERSION(3, 1, 2):
4510 		case IP_VERSION(3, 1, 3):
4511 			adev->mode_info.num_crtc = 4;
4512 			adev->mode_info.num_hpd = 4;
4513 			adev->mode_info.num_dig = 4;
4514 			break;
4515 		default:
4516 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4517 					adev->ip_versions[DCE_HWIP][0]);
4518 			return -EINVAL;
4519 		}
4520 #endif
4521 		break;
4522 	}
4523 
4524 	amdgpu_dm_set_irq_funcs(adev);
4525 
4526 	if (adev->mode_info.funcs == NULL)
4527 		adev->mode_info.funcs = &dm_display_funcs;
4528 
4529 	/*
4530 	 * Note: Do NOT change adev->audio_endpt_rreg and
4531 	 * adev->audio_endpt_wreg because they are initialised in
4532 	 * amdgpu_device_init()
4533 	 */
4534 #if defined(CONFIG_DEBUG_KERNEL_DC)
4535 	device_create_file(
4536 		adev_to_drm(adev)->dev,
4537 		&dev_attr_s3_debug);
4538 #endif
4539 
4540 	return 0;
4541 }
4542 
4543 static bool modeset_required(struct drm_crtc_state *crtc_state,
4544 			     struct dc_stream_state *new_stream,
4545 			     struct dc_stream_state *old_stream)
4546 {
4547 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4548 }
4549 
4550 static bool modereset_required(struct drm_crtc_state *crtc_state)
4551 {
4552 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4553 }
4554 
4555 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4556 {
4557 	drm_encoder_cleanup(encoder);
4558 	kfree(encoder);
4559 }
4560 
4561 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4562 	.destroy = amdgpu_dm_encoder_destroy,
4563 };
4564 
4565 
4566 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4567 					 struct drm_framebuffer *fb,
4568 					 int *min_downscale, int *max_upscale)
4569 {
4570 	struct amdgpu_device *adev = drm_to_adev(dev);
4571 	struct dc *dc = adev->dm.dc;
4572 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4573 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4574 
4575 	switch (fb->format->format) {
4576 	case DRM_FORMAT_P010:
4577 	case DRM_FORMAT_NV12:
4578 	case DRM_FORMAT_NV21:
4579 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4580 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4581 		break;
4582 
4583 	case DRM_FORMAT_XRGB16161616F:
4584 	case DRM_FORMAT_ARGB16161616F:
4585 	case DRM_FORMAT_XBGR16161616F:
4586 	case DRM_FORMAT_ABGR16161616F:
4587 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4588 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4589 		break;
4590 
4591 	default:
4592 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4593 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4594 		break;
4595 	}
4596 
4597 	/*
4598 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4599 	 * scaling factor of 1.0 == 1000 units.
4600 	 */
4601 	if (*max_upscale == 1)
4602 		*max_upscale = 1000;
4603 
4604 	if (*min_downscale == 1)
4605 		*min_downscale = 1000;
4606 }
4607 
4608 
4609 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4610 				const struct drm_plane_state *state,
4611 				struct dc_scaling_info *scaling_info)
4612 {
4613 	int scale_w, scale_h, min_downscale, max_upscale;
4614 
4615 	memset(scaling_info, 0, sizeof(*scaling_info));
4616 
4617 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4618 	scaling_info->src_rect.x = state->src_x >> 16;
4619 	scaling_info->src_rect.y = state->src_y >> 16;
4620 
4621 	/*
4622 	 * For reasons we don't (yet) fully understand a non-zero
4623 	 * src_y coordinate into an NV12 buffer can cause a
4624 	 * system hang on DCN1x.
4625 	 * To avoid hangs (and maybe be overly cautious)
4626 	 * let's reject both non-zero src_x and src_y.
4627 	 *
4628 	 * We currently know of only one use-case to reproduce a
4629 	 * scenario with non-zero src_x and src_y for NV12, which
4630 	 * is to gesture the YouTube Android app into full screen
4631 	 * on ChromeOS.
4632 	 */
4633 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4634 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4635 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4636 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4637 		return -EINVAL;
4638 
4639 	scaling_info->src_rect.width = state->src_w >> 16;
4640 	if (scaling_info->src_rect.width == 0)
4641 		return -EINVAL;
4642 
4643 	scaling_info->src_rect.height = state->src_h >> 16;
4644 	if (scaling_info->src_rect.height == 0)
4645 		return -EINVAL;
4646 
4647 	scaling_info->dst_rect.x = state->crtc_x;
4648 	scaling_info->dst_rect.y = state->crtc_y;
4649 
4650 	if (state->crtc_w == 0)
4651 		return -EINVAL;
4652 
4653 	scaling_info->dst_rect.width = state->crtc_w;
4654 
4655 	if (state->crtc_h == 0)
4656 		return -EINVAL;
4657 
4658 	scaling_info->dst_rect.height = state->crtc_h;
4659 
4660 	/* DRM doesn't specify clipping on destination output. */
4661 	scaling_info->clip_rect = scaling_info->dst_rect;
4662 
4663 	/* Validate scaling per-format with DC plane caps */
4664 	if (state->plane && state->plane->dev && state->fb) {
4665 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4666 					     &min_downscale, &max_upscale);
4667 	} else {
4668 		min_downscale = 250;
4669 		max_upscale = 16000;
4670 	}
4671 
4672 	scale_w = scaling_info->dst_rect.width * 1000 /
4673 		  scaling_info->src_rect.width;
4674 
4675 	if (scale_w < min_downscale || scale_w > max_upscale)
4676 		return -EINVAL;
4677 
4678 	scale_h = scaling_info->dst_rect.height * 1000 /
4679 		  scaling_info->src_rect.height;
4680 
4681 	if (scale_h < min_downscale || scale_h > max_upscale)
4682 		return -EINVAL;
4683 
4684 	/*
4685 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4686 	 * assume reasonable defaults based on the format.
4687 	 */
4688 
4689 	return 0;
4690 }
4691 
4692 static void
4693 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4694 				 uint64_t tiling_flags)
4695 {
4696 	/* Fill GFX8 params */
4697 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4698 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4699 
4700 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4701 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4702 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4703 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4704 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4705 
4706 		/* XXX fix me for VI */
4707 		tiling_info->gfx8.num_banks = num_banks;
4708 		tiling_info->gfx8.array_mode =
4709 				DC_ARRAY_2D_TILED_THIN1;
4710 		tiling_info->gfx8.tile_split = tile_split;
4711 		tiling_info->gfx8.bank_width = bankw;
4712 		tiling_info->gfx8.bank_height = bankh;
4713 		tiling_info->gfx8.tile_aspect = mtaspect;
4714 		tiling_info->gfx8.tile_mode =
4715 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4716 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4717 			== DC_ARRAY_1D_TILED_THIN1) {
4718 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4719 	}
4720 
4721 	tiling_info->gfx8.pipe_config =
4722 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4723 }
4724 
4725 static void
4726 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4727 				  union dc_tiling_info *tiling_info)
4728 {
4729 	tiling_info->gfx9.num_pipes =
4730 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4731 	tiling_info->gfx9.num_banks =
4732 		adev->gfx.config.gb_addr_config_fields.num_banks;
4733 	tiling_info->gfx9.pipe_interleave =
4734 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4735 	tiling_info->gfx9.num_shader_engines =
4736 		adev->gfx.config.gb_addr_config_fields.num_se;
4737 	tiling_info->gfx9.max_compressed_frags =
4738 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4739 	tiling_info->gfx9.num_rb_per_se =
4740 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4741 	tiling_info->gfx9.shaderEnable = 1;
4742 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4743 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4744 }
4745 
4746 static int
4747 validate_dcc(struct amdgpu_device *adev,
4748 	     const enum surface_pixel_format format,
4749 	     const enum dc_rotation_angle rotation,
4750 	     const union dc_tiling_info *tiling_info,
4751 	     const struct dc_plane_dcc_param *dcc,
4752 	     const struct dc_plane_address *address,
4753 	     const struct plane_size *plane_size)
4754 {
4755 	struct dc *dc = adev->dm.dc;
4756 	struct dc_dcc_surface_param input;
4757 	struct dc_surface_dcc_cap output;
4758 
4759 	memset(&input, 0, sizeof(input));
4760 	memset(&output, 0, sizeof(output));
4761 
4762 	if (!dcc->enable)
4763 		return 0;
4764 
4765 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4766 	    !dc->cap_funcs.get_dcc_compression_cap)
4767 		return -EINVAL;
4768 
4769 	input.format = format;
4770 	input.surface_size.width = plane_size->surface_size.width;
4771 	input.surface_size.height = plane_size->surface_size.height;
4772 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4773 
4774 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4775 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4776 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4777 		input.scan = SCAN_DIRECTION_VERTICAL;
4778 
4779 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4780 		return -EINVAL;
4781 
4782 	if (!output.capable)
4783 		return -EINVAL;
4784 
4785 	if (dcc->independent_64b_blks == 0 &&
4786 	    output.grph.rgb.independent_64b_blks != 0)
4787 		return -EINVAL;
4788 
4789 	return 0;
4790 }
4791 
4792 static bool
4793 modifier_has_dcc(uint64_t modifier)
4794 {
4795 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4796 }
4797 
4798 static unsigned
4799 modifier_gfx9_swizzle_mode(uint64_t modifier)
4800 {
4801 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4802 		return 0;
4803 
4804 	return AMD_FMT_MOD_GET(TILE, modifier);
4805 }
4806 
4807 static const struct drm_format_info *
4808 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4809 {
4810 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4811 }
4812 
4813 static void
4814 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4815 				    union dc_tiling_info *tiling_info,
4816 				    uint64_t modifier)
4817 {
4818 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4819 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4820 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4821 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4822 
4823 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4824 
4825 	if (!IS_AMD_FMT_MOD(modifier))
4826 		return;
4827 
4828 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4829 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4830 
4831 	if (adev->family >= AMDGPU_FAMILY_NV) {
4832 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4833 	} else {
4834 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4835 
4836 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4837 	}
4838 }
4839 
4840 enum dm_micro_swizzle {
4841 	MICRO_SWIZZLE_Z = 0,
4842 	MICRO_SWIZZLE_S = 1,
4843 	MICRO_SWIZZLE_D = 2,
4844 	MICRO_SWIZZLE_R = 3
4845 };
4846 
4847 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4848 					  uint32_t format,
4849 					  uint64_t modifier)
4850 {
4851 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4852 	const struct drm_format_info *info = drm_format_info(format);
4853 	int i;
4854 
4855 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4856 
4857 	if (!info)
4858 		return false;
4859 
4860 	/*
4861 	 * We always have to allow these modifiers:
4862 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4863 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4864 	 */
4865 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4866 	    modifier == DRM_FORMAT_MOD_INVALID) {
4867 		return true;
4868 	}
4869 
4870 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4871 	for (i = 0; i < plane->modifier_count; i++) {
4872 		if (modifier == plane->modifiers[i])
4873 			break;
4874 	}
4875 	if (i == plane->modifier_count)
4876 		return false;
4877 
4878 	/*
4879 	 * For D swizzle the canonical modifier depends on the bpp, so check
4880 	 * it here.
4881 	 */
4882 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4883 	    adev->family >= AMDGPU_FAMILY_NV) {
4884 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4885 			return false;
4886 	}
4887 
4888 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4889 	    info->cpp[0] < 8)
4890 		return false;
4891 
4892 	if (modifier_has_dcc(modifier)) {
4893 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4894 		if (info->cpp[0] != 4)
4895 			return false;
4896 		/* We support multi-planar formats, but not when combined with
4897 		 * additional DCC metadata planes. */
4898 		if (info->num_planes > 1)
4899 			return false;
4900 	}
4901 
4902 	return true;
4903 }
4904 
4905 static void
4906 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4907 {
4908 	if (!*mods)
4909 		return;
4910 
4911 	if (*cap - *size < 1) {
4912 		uint64_t new_cap = *cap * 2;
4913 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4914 
4915 		if (!new_mods) {
4916 			kfree(*mods);
4917 			*mods = NULL;
4918 			return;
4919 		}
4920 
4921 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4922 		kfree(*mods);
4923 		*mods = new_mods;
4924 		*cap = new_cap;
4925 	}
4926 
4927 	(*mods)[*size] = mod;
4928 	*size += 1;
4929 }
4930 
4931 static void
4932 add_gfx9_modifiers(const struct amdgpu_device *adev,
4933 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4934 {
4935 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4936 	int pipe_xor_bits = min(8, pipes +
4937 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4938 	int bank_xor_bits = min(8 - pipe_xor_bits,
4939 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4940 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4941 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4942 
4943 
4944 	if (adev->family == AMDGPU_FAMILY_RV) {
4945 		/* Raven2 and later */
4946 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4947 
4948 		/*
4949 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4950 		 * doesn't support _D on DCN
4951 		 */
4952 
4953 		if (has_constant_encode) {
4954 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4955 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4956 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4957 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4958 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4959 				    AMD_FMT_MOD_SET(DCC, 1) |
4960 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4961 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4962 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4963 		}
4964 
4965 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4966 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4967 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4968 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4969 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4970 			    AMD_FMT_MOD_SET(DCC, 1) |
4971 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4972 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4973 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4974 
4975 		if (has_constant_encode) {
4976 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4977 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4978 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4979 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4980 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4981 				    AMD_FMT_MOD_SET(DCC, 1) |
4982 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4983 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4984 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4985 
4986 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4987 				    AMD_FMT_MOD_SET(RB, rb) |
4988 				    AMD_FMT_MOD_SET(PIPE, pipes));
4989 		}
4990 
4991 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4992 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4993 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4994 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4995 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4996 			    AMD_FMT_MOD_SET(DCC, 1) |
4997 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4998 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4999 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5000 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5001 			    AMD_FMT_MOD_SET(RB, rb) |
5002 			    AMD_FMT_MOD_SET(PIPE, pipes));
5003 	}
5004 
5005 	/*
5006 	 * Only supported for 64bpp on Raven, will be filtered on format in
5007 	 * dm_plane_format_mod_supported.
5008 	 */
5009 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5010 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5011 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5012 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5013 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5014 
5015 	if (adev->family == AMDGPU_FAMILY_RV) {
5016 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5017 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5018 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5019 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5020 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5021 	}
5022 
5023 	/*
5024 	 * Only supported for 64bpp on Raven, will be filtered on format in
5025 	 * dm_plane_format_mod_supported.
5026 	 */
5027 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5028 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5029 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5030 
5031 	if (adev->family == AMDGPU_FAMILY_RV) {
5032 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5033 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5034 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5035 	}
5036 }
5037 
5038 static void
5039 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5040 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5041 {
5042 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5043 
5044 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5045 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5046 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5047 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5048 		    AMD_FMT_MOD_SET(DCC, 1) |
5049 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5050 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5051 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5052 
5053 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5054 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5055 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5056 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5057 		    AMD_FMT_MOD_SET(DCC, 1) |
5058 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5059 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5060 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5061 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5062 
5063 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5065 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5066 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5067 
5068 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5069 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5070 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5071 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5072 
5073 
5074 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5075 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5076 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5077 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5078 
5079 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5080 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5081 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5082 }
5083 
5084 static void
5085 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5086 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5087 {
5088 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5089 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5090 
5091 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5092 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5093 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5094 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5095 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5096 		    AMD_FMT_MOD_SET(DCC, 1) |
5097 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5098 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5099 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5100 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5101 
5102 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5103 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5104 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5105 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5106 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5107 		    AMD_FMT_MOD_SET(DCC, 1) |
5108 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5109 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5110 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5111 
5112 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5113 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5114 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5115 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5116 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5117 		    AMD_FMT_MOD_SET(DCC, 1) |
5118 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5119 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5120 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5121 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5122 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5123 
5124 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5126 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5127 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5128 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5129 		    AMD_FMT_MOD_SET(DCC, 1) |
5130 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5131 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5132 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5133 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5134 
5135 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5136 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5137 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5138 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5139 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5140 
5141 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5142 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5143 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5144 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5145 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5146 
5147 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5148 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5149 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5150 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5151 
5152 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5153 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5154 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5155 }
5156 
5157 static int
5158 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5159 {
5160 	uint64_t size = 0, capacity = 128;
5161 	*mods = NULL;
5162 
5163 	/* We have not hooked up any pre-GFX9 modifiers. */
5164 	if (adev->family < AMDGPU_FAMILY_AI)
5165 		return 0;
5166 
5167 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5168 
5169 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5170 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5171 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5172 		return *mods ? 0 : -ENOMEM;
5173 	}
5174 
5175 	switch (adev->family) {
5176 	case AMDGPU_FAMILY_AI:
5177 	case AMDGPU_FAMILY_RV:
5178 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5179 		break;
5180 	case AMDGPU_FAMILY_NV:
5181 	case AMDGPU_FAMILY_VGH:
5182 	case AMDGPU_FAMILY_YC:
5183 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5184 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5185 		else
5186 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5187 		break;
5188 	}
5189 
5190 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5191 
5192 	/* INVALID marks the end of the list. */
5193 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5194 
5195 	if (!*mods)
5196 		return -ENOMEM;
5197 
5198 	return 0;
5199 }
5200 
5201 static int
5202 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5203 					  const struct amdgpu_framebuffer *afb,
5204 					  const enum surface_pixel_format format,
5205 					  const enum dc_rotation_angle rotation,
5206 					  const struct plane_size *plane_size,
5207 					  union dc_tiling_info *tiling_info,
5208 					  struct dc_plane_dcc_param *dcc,
5209 					  struct dc_plane_address *address,
5210 					  const bool force_disable_dcc)
5211 {
5212 	const uint64_t modifier = afb->base.modifier;
5213 	int ret = 0;
5214 
5215 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5216 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5217 
5218 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5219 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5220 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5221 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5222 
5223 		dcc->enable = 1;
5224 		dcc->meta_pitch = afb->base.pitches[1];
5225 		dcc->independent_64b_blks = independent_64b_blks;
5226 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5227 			if (independent_64b_blks && independent_128b_blks)
5228 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5229 			else if (independent_128b_blks)
5230 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5231 			else if (independent_64b_blks && !independent_128b_blks)
5232 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5233 			else
5234 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5235 		} else {
5236 			if (independent_64b_blks)
5237 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5238 			else
5239 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5240 		}
5241 
5242 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5243 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5244 	}
5245 
5246 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5247 	if (ret)
5248 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5249 
5250 	return ret;
5251 }
5252 
5253 static int
5254 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5255 			     const struct amdgpu_framebuffer *afb,
5256 			     const enum surface_pixel_format format,
5257 			     const enum dc_rotation_angle rotation,
5258 			     const uint64_t tiling_flags,
5259 			     union dc_tiling_info *tiling_info,
5260 			     struct plane_size *plane_size,
5261 			     struct dc_plane_dcc_param *dcc,
5262 			     struct dc_plane_address *address,
5263 			     bool tmz_surface,
5264 			     bool force_disable_dcc)
5265 {
5266 	const struct drm_framebuffer *fb = &afb->base;
5267 	int ret;
5268 
5269 	memset(tiling_info, 0, sizeof(*tiling_info));
5270 	memset(plane_size, 0, sizeof(*plane_size));
5271 	memset(dcc, 0, sizeof(*dcc));
5272 	memset(address, 0, sizeof(*address));
5273 
5274 	address->tmz_surface = tmz_surface;
5275 
5276 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5277 		uint64_t addr = afb->address + fb->offsets[0];
5278 
5279 		plane_size->surface_size.x = 0;
5280 		plane_size->surface_size.y = 0;
5281 		plane_size->surface_size.width = fb->width;
5282 		plane_size->surface_size.height = fb->height;
5283 		plane_size->surface_pitch =
5284 			fb->pitches[0] / fb->format->cpp[0];
5285 
5286 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5287 		address->grph.addr.low_part = lower_32_bits(addr);
5288 		address->grph.addr.high_part = upper_32_bits(addr);
5289 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5290 		uint64_t luma_addr = afb->address + fb->offsets[0];
5291 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5292 
5293 		plane_size->surface_size.x = 0;
5294 		plane_size->surface_size.y = 0;
5295 		plane_size->surface_size.width = fb->width;
5296 		plane_size->surface_size.height = fb->height;
5297 		plane_size->surface_pitch =
5298 			fb->pitches[0] / fb->format->cpp[0];
5299 
5300 		plane_size->chroma_size.x = 0;
5301 		plane_size->chroma_size.y = 0;
5302 		/* TODO: set these based on surface format */
5303 		plane_size->chroma_size.width = fb->width / 2;
5304 		plane_size->chroma_size.height = fb->height / 2;
5305 
5306 		plane_size->chroma_pitch =
5307 			fb->pitches[1] / fb->format->cpp[1];
5308 
5309 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5310 		address->video_progressive.luma_addr.low_part =
5311 			lower_32_bits(luma_addr);
5312 		address->video_progressive.luma_addr.high_part =
5313 			upper_32_bits(luma_addr);
5314 		address->video_progressive.chroma_addr.low_part =
5315 			lower_32_bits(chroma_addr);
5316 		address->video_progressive.chroma_addr.high_part =
5317 			upper_32_bits(chroma_addr);
5318 	}
5319 
5320 	if (adev->family >= AMDGPU_FAMILY_AI) {
5321 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5322 								rotation, plane_size,
5323 								tiling_info, dcc,
5324 								address,
5325 								force_disable_dcc);
5326 		if (ret)
5327 			return ret;
5328 	} else {
5329 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5330 	}
5331 
5332 	return 0;
5333 }
5334 
5335 static void
5336 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5337 			       bool *per_pixel_alpha, bool *global_alpha,
5338 			       int *global_alpha_value)
5339 {
5340 	*per_pixel_alpha = false;
5341 	*global_alpha = false;
5342 	*global_alpha_value = 0xff;
5343 
5344 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5345 		return;
5346 
5347 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5348 		static const uint32_t alpha_formats[] = {
5349 			DRM_FORMAT_ARGB8888,
5350 			DRM_FORMAT_RGBA8888,
5351 			DRM_FORMAT_ABGR8888,
5352 		};
5353 		uint32_t format = plane_state->fb->format->format;
5354 		unsigned int i;
5355 
5356 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5357 			if (format == alpha_formats[i]) {
5358 				*per_pixel_alpha = true;
5359 				break;
5360 			}
5361 		}
5362 	}
5363 
5364 	if (plane_state->alpha < 0xffff) {
5365 		*global_alpha = true;
5366 		*global_alpha_value = plane_state->alpha >> 8;
5367 	}
5368 }
5369 
5370 static int
5371 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5372 			    const enum surface_pixel_format format,
5373 			    enum dc_color_space *color_space)
5374 {
5375 	bool full_range;
5376 
5377 	*color_space = COLOR_SPACE_SRGB;
5378 
5379 	/* DRM color properties only affect non-RGB formats. */
5380 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5381 		return 0;
5382 
5383 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5384 
5385 	switch (plane_state->color_encoding) {
5386 	case DRM_COLOR_YCBCR_BT601:
5387 		if (full_range)
5388 			*color_space = COLOR_SPACE_YCBCR601;
5389 		else
5390 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5391 		break;
5392 
5393 	case DRM_COLOR_YCBCR_BT709:
5394 		if (full_range)
5395 			*color_space = COLOR_SPACE_YCBCR709;
5396 		else
5397 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5398 		break;
5399 
5400 	case DRM_COLOR_YCBCR_BT2020:
5401 		if (full_range)
5402 			*color_space = COLOR_SPACE_2020_YCBCR;
5403 		else
5404 			return -EINVAL;
5405 		break;
5406 
5407 	default:
5408 		return -EINVAL;
5409 	}
5410 
5411 	return 0;
5412 }
5413 
5414 static int
5415 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5416 			    const struct drm_plane_state *plane_state,
5417 			    const uint64_t tiling_flags,
5418 			    struct dc_plane_info *plane_info,
5419 			    struct dc_plane_address *address,
5420 			    bool tmz_surface,
5421 			    bool force_disable_dcc)
5422 {
5423 	const struct drm_framebuffer *fb = plane_state->fb;
5424 	const struct amdgpu_framebuffer *afb =
5425 		to_amdgpu_framebuffer(plane_state->fb);
5426 	int ret;
5427 
5428 	memset(plane_info, 0, sizeof(*plane_info));
5429 
5430 	switch (fb->format->format) {
5431 	case DRM_FORMAT_C8:
5432 		plane_info->format =
5433 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5434 		break;
5435 	case DRM_FORMAT_RGB565:
5436 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5437 		break;
5438 	case DRM_FORMAT_XRGB8888:
5439 	case DRM_FORMAT_ARGB8888:
5440 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5441 		break;
5442 	case DRM_FORMAT_XRGB2101010:
5443 	case DRM_FORMAT_ARGB2101010:
5444 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5445 		break;
5446 	case DRM_FORMAT_XBGR2101010:
5447 	case DRM_FORMAT_ABGR2101010:
5448 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5449 		break;
5450 	case DRM_FORMAT_XBGR8888:
5451 	case DRM_FORMAT_ABGR8888:
5452 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5453 		break;
5454 	case DRM_FORMAT_NV21:
5455 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5456 		break;
5457 	case DRM_FORMAT_NV12:
5458 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5459 		break;
5460 	case DRM_FORMAT_P010:
5461 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5462 		break;
5463 	case DRM_FORMAT_XRGB16161616F:
5464 	case DRM_FORMAT_ARGB16161616F:
5465 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5466 		break;
5467 	case DRM_FORMAT_XBGR16161616F:
5468 	case DRM_FORMAT_ABGR16161616F:
5469 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5470 		break;
5471 	case DRM_FORMAT_XRGB16161616:
5472 	case DRM_FORMAT_ARGB16161616:
5473 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5474 		break;
5475 	case DRM_FORMAT_XBGR16161616:
5476 	case DRM_FORMAT_ABGR16161616:
5477 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5478 		break;
5479 	default:
5480 		DRM_ERROR(
5481 			"Unsupported screen format %p4cc\n",
5482 			&fb->format->format);
5483 		return -EINVAL;
5484 	}
5485 
5486 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5487 	case DRM_MODE_ROTATE_0:
5488 		plane_info->rotation = ROTATION_ANGLE_0;
5489 		break;
5490 	case DRM_MODE_ROTATE_90:
5491 		plane_info->rotation = ROTATION_ANGLE_90;
5492 		break;
5493 	case DRM_MODE_ROTATE_180:
5494 		plane_info->rotation = ROTATION_ANGLE_180;
5495 		break;
5496 	case DRM_MODE_ROTATE_270:
5497 		plane_info->rotation = ROTATION_ANGLE_270;
5498 		break;
5499 	default:
5500 		plane_info->rotation = ROTATION_ANGLE_0;
5501 		break;
5502 	}
5503 
5504 	plane_info->visible = true;
5505 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5506 
5507 	plane_info->layer_index = 0;
5508 
5509 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5510 					  &plane_info->color_space);
5511 	if (ret)
5512 		return ret;
5513 
5514 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5515 					   plane_info->rotation, tiling_flags,
5516 					   &plane_info->tiling_info,
5517 					   &plane_info->plane_size,
5518 					   &plane_info->dcc, address, tmz_surface,
5519 					   force_disable_dcc);
5520 	if (ret)
5521 		return ret;
5522 
5523 	fill_blending_from_plane_state(
5524 		plane_state, &plane_info->per_pixel_alpha,
5525 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5526 
5527 	return 0;
5528 }
5529 
5530 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5531 				    struct dc_plane_state *dc_plane_state,
5532 				    struct drm_plane_state *plane_state,
5533 				    struct drm_crtc_state *crtc_state)
5534 {
5535 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5536 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5537 	struct dc_scaling_info scaling_info;
5538 	struct dc_plane_info plane_info;
5539 	int ret;
5540 	bool force_disable_dcc = false;
5541 
5542 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5543 	if (ret)
5544 		return ret;
5545 
5546 	dc_plane_state->src_rect = scaling_info.src_rect;
5547 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5548 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5549 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5550 
5551 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5552 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5553 					  afb->tiling_flags,
5554 					  &plane_info,
5555 					  &dc_plane_state->address,
5556 					  afb->tmz_surface,
5557 					  force_disable_dcc);
5558 	if (ret)
5559 		return ret;
5560 
5561 	dc_plane_state->format = plane_info.format;
5562 	dc_plane_state->color_space = plane_info.color_space;
5563 	dc_plane_state->format = plane_info.format;
5564 	dc_plane_state->plane_size = plane_info.plane_size;
5565 	dc_plane_state->rotation = plane_info.rotation;
5566 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5567 	dc_plane_state->stereo_format = plane_info.stereo_format;
5568 	dc_plane_state->tiling_info = plane_info.tiling_info;
5569 	dc_plane_state->visible = plane_info.visible;
5570 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5571 	dc_plane_state->global_alpha = plane_info.global_alpha;
5572 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5573 	dc_plane_state->dcc = plane_info.dcc;
5574 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5575 	dc_plane_state->flip_int_enabled = true;
5576 
5577 	/*
5578 	 * Always set input transfer function, since plane state is refreshed
5579 	 * every time.
5580 	 */
5581 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5582 	if (ret)
5583 		return ret;
5584 
5585 	return 0;
5586 }
5587 
5588 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5589 					   const struct dm_connector_state *dm_state,
5590 					   struct dc_stream_state *stream)
5591 {
5592 	enum amdgpu_rmx_type rmx_type;
5593 
5594 	struct rect src = { 0 }; /* viewport in composition space*/
5595 	struct rect dst = { 0 }; /* stream addressable area */
5596 
5597 	/* no mode. nothing to be done */
5598 	if (!mode)
5599 		return;
5600 
5601 	/* Full screen scaling by default */
5602 	src.width = mode->hdisplay;
5603 	src.height = mode->vdisplay;
5604 	dst.width = stream->timing.h_addressable;
5605 	dst.height = stream->timing.v_addressable;
5606 
5607 	if (dm_state) {
5608 		rmx_type = dm_state->scaling;
5609 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5610 			if (src.width * dst.height <
5611 					src.height * dst.width) {
5612 				/* height needs less upscaling/more downscaling */
5613 				dst.width = src.width *
5614 						dst.height / src.height;
5615 			} else {
5616 				/* width needs less upscaling/more downscaling */
5617 				dst.height = src.height *
5618 						dst.width / src.width;
5619 			}
5620 		} else if (rmx_type == RMX_CENTER) {
5621 			dst = src;
5622 		}
5623 
5624 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5625 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5626 
5627 		if (dm_state->underscan_enable) {
5628 			dst.x += dm_state->underscan_hborder / 2;
5629 			dst.y += dm_state->underscan_vborder / 2;
5630 			dst.width -= dm_state->underscan_hborder;
5631 			dst.height -= dm_state->underscan_vborder;
5632 		}
5633 	}
5634 
5635 	stream->src = src;
5636 	stream->dst = dst;
5637 
5638 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5639 		      dst.x, dst.y, dst.width, dst.height);
5640 
5641 }
5642 
5643 static enum dc_color_depth
5644 convert_color_depth_from_display_info(const struct drm_connector *connector,
5645 				      bool is_y420, int requested_bpc)
5646 {
5647 	uint8_t bpc;
5648 
5649 	if (is_y420) {
5650 		bpc = 8;
5651 
5652 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5653 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5654 			bpc = 16;
5655 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5656 			bpc = 12;
5657 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5658 			bpc = 10;
5659 	} else {
5660 		bpc = (uint8_t)connector->display_info.bpc;
5661 		/* Assume 8 bpc by default if no bpc is specified. */
5662 		bpc = bpc ? bpc : 8;
5663 	}
5664 
5665 	if (requested_bpc > 0) {
5666 		/*
5667 		 * Cap display bpc based on the user requested value.
5668 		 *
5669 		 * The value for state->max_bpc may not correctly updated
5670 		 * depending on when the connector gets added to the state
5671 		 * or if this was called outside of atomic check, so it
5672 		 * can't be used directly.
5673 		 */
5674 		bpc = min_t(u8, bpc, requested_bpc);
5675 
5676 		/* Round down to the nearest even number. */
5677 		bpc = bpc - (bpc & 1);
5678 	}
5679 
5680 	switch (bpc) {
5681 	case 0:
5682 		/*
5683 		 * Temporary Work around, DRM doesn't parse color depth for
5684 		 * EDID revision before 1.4
5685 		 * TODO: Fix edid parsing
5686 		 */
5687 		return COLOR_DEPTH_888;
5688 	case 6:
5689 		return COLOR_DEPTH_666;
5690 	case 8:
5691 		return COLOR_DEPTH_888;
5692 	case 10:
5693 		return COLOR_DEPTH_101010;
5694 	case 12:
5695 		return COLOR_DEPTH_121212;
5696 	case 14:
5697 		return COLOR_DEPTH_141414;
5698 	case 16:
5699 		return COLOR_DEPTH_161616;
5700 	default:
5701 		return COLOR_DEPTH_UNDEFINED;
5702 	}
5703 }
5704 
5705 static enum dc_aspect_ratio
5706 get_aspect_ratio(const struct drm_display_mode *mode_in)
5707 {
5708 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5709 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5710 }
5711 
5712 static enum dc_color_space
5713 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5714 {
5715 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5716 
5717 	switch (dc_crtc_timing->pixel_encoding)	{
5718 	case PIXEL_ENCODING_YCBCR422:
5719 	case PIXEL_ENCODING_YCBCR444:
5720 	case PIXEL_ENCODING_YCBCR420:
5721 	{
5722 		/*
5723 		 * 27030khz is the separation point between HDTV and SDTV
5724 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5725 		 * respectively
5726 		 */
5727 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5728 			if (dc_crtc_timing->flags.Y_ONLY)
5729 				color_space =
5730 					COLOR_SPACE_YCBCR709_LIMITED;
5731 			else
5732 				color_space = COLOR_SPACE_YCBCR709;
5733 		} else {
5734 			if (dc_crtc_timing->flags.Y_ONLY)
5735 				color_space =
5736 					COLOR_SPACE_YCBCR601_LIMITED;
5737 			else
5738 				color_space = COLOR_SPACE_YCBCR601;
5739 		}
5740 
5741 	}
5742 	break;
5743 	case PIXEL_ENCODING_RGB:
5744 		color_space = COLOR_SPACE_SRGB;
5745 		break;
5746 
5747 	default:
5748 		WARN_ON(1);
5749 		break;
5750 	}
5751 
5752 	return color_space;
5753 }
5754 
5755 static bool adjust_colour_depth_from_display_info(
5756 	struct dc_crtc_timing *timing_out,
5757 	const struct drm_display_info *info)
5758 {
5759 	enum dc_color_depth depth = timing_out->display_color_depth;
5760 	int normalized_clk;
5761 	do {
5762 		normalized_clk = timing_out->pix_clk_100hz / 10;
5763 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5764 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5765 			normalized_clk /= 2;
5766 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5767 		switch (depth) {
5768 		case COLOR_DEPTH_888:
5769 			break;
5770 		case COLOR_DEPTH_101010:
5771 			normalized_clk = (normalized_clk * 30) / 24;
5772 			break;
5773 		case COLOR_DEPTH_121212:
5774 			normalized_clk = (normalized_clk * 36) / 24;
5775 			break;
5776 		case COLOR_DEPTH_161616:
5777 			normalized_clk = (normalized_clk * 48) / 24;
5778 			break;
5779 		default:
5780 			/* The above depths are the only ones valid for HDMI. */
5781 			return false;
5782 		}
5783 		if (normalized_clk <= info->max_tmds_clock) {
5784 			timing_out->display_color_depth = depth;
5785 			return true;
5786 		}
5787 	} while (--depth > COLOR_DEPTH_666);
5788 	return false;
5789 }
5790 
5791 static void fill_stream_properties_from_drm_display_mode(
5792 	struct dc_stream_state *stream,
5793 	const struct drm_display_mode *mode_in,
5794 	const struct drm_connector *connector,
5795 	const struct drm_connector_state *connector_state,
5796 	const struct dc_stream_state *old_stream,
5797 	int requested_bpc)
5798 {
5799 	struct dc_crtc_timing *timing_out = &stream->timing;
5800 	const struct drm_display_info *info = &connector->display_info;
5801 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5802 	struct hdmi_vendor_infoframe hv_frame;
5803 	struct hdmi_avi_infoframe avi_frame;
5804 
5805 	memset(&hv_frame, 0, sizeof(hv_frame));
5806 	memset(&avi_frame, 0, sizeof(avi_frame));
5807 
5808 	timing_out->h_border_left = 0;
5809 	timing_out->h_border_right = 0;
5810 	timing_out->v_border_top = 0;
5811 	timing_out->v_border_bottom = 0;
5812 	/* TODO: un-hardcode */
5813 	if (drm_mode_is_420_only(info, mode_in)
5814 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5815 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5816 	else if (drm_mode_is_420_also(info, mode_in)
5817 			&& aconnector->force_yuv420_output)
5818 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5819 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5820 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5821 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5822 	else
5823 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5824 
5825 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5826 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5827 		connector,
5828 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5829 		requested_bpc);
5830 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5831 	timing_out->hdmi_vic = 0;
5832 
5833 	if(old_stream) {
5834 		timing_out->vic = old_stream->timing.vic;
5835 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5836 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5837 	} else {
5838 		timing_out->vic = drm_match_cea_mode(mode_in);
5839 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5840 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5841 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5842 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5843 	}
5844 
5845 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5846 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5847 		timing_out->vic = avi_frame.video_code;
5848 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5849 		timing_out->hdmi_vic = hv_frame.vic;
5850 	}
5851 
5852 	if (is_freesync_video_mode(mode_in, aconnector)) {
5853 		timing_out->h_addressable = mode_in->hdisplay;
5854 		timing_out->h_total = mode_in->htotal;
5855 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5856 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5857 		timing_out->v_total = mode_in->vtotal;
5858 		timing_out->v_addressable = mode_in->vdisplay;
5859 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5860 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5861 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5862 	} else {
5863 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5864 		timing_out->h_total = mode_in->crtc_htotal;
5865 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5866 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5867 		timing_out->v_total = mode_in->crtc_vtotal;
5868 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5869 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5870 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5871 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5872 	}
5873 
5874 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5875 
5876 	stream->output_color_space = get_output_color_space(timing_out);
5877 
5878 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5879 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5880 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5881 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5882 		    drm_mode_is_420_also(info, mode_in) &&
5883 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5884 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5885 			adjust_colour_depth_from_display_info(timing_out, info);
5886 		}
5887 	}
5888 }
5889 
5890 static void fill_audio_info(struct audio_info *audio_info,
5891 			    const struct drm_connector *drm_connector,
5892 			    const struct dc_sink *dc_sink)
5893 {
5894 	int i = 0;
5895 	int cea_revision = 0;
5896 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5897 
5898 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5899 	audio_info->product_id = edid_caps->product_id;
5900 
5901 	cea_revision = drm_connector->display_info.cea_rev;
5902 
5903 	strscpy(audio_info->display_name,
5904 		edid_caps->display_name,
5905 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5906 
5907 	if (cea_revision >= 3) {
5908 		audio_info->mode_count = edid_caps->audio_mode_count;
5909 
5910 		for (i = 0; i < audio_info->mode_count; ++i) {
5911 			audio_info->modes[i].format_code =
5912 					(enum audio_format_code)
5913 					(edid_caps->audio_modes[i].format_code);
5914 			audio_info->modes[i].channel_count =
5915 					edid_caps->audio_modes[i].channel_count;
5916 			audio_info->modes[i].sample_rates.all =
5917 					edid_caps->audio_modes[i].sample_rate;
5918 			audio_info->modes[i].sample_size =
5919 					edid_caps->audio_modes[i].sample_size;
5920 		}
5921 	}
5922 
5923 	audio_info->flags.all = edid_caps->speaker_flags;
5924 
5925 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5926 	if (drm_connector->latency_present[0]) {
5927 		audio_info->video_latency = drm_connector->video_latency[0];
5928 		audio_info->audio_latency = drm_connector->audio_latency[0];
5929 	}
5930 
5931 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5932 
5933 }
5934 
5935 static void
5936 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5937 				      struct drm_display_mode *dst_mode)
5938 {
5939 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5940 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5941 	dst_mode->crtc_clock = src_mode->crtc_clock;
5942 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5943 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5944 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5945 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5946 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5947 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5948 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5949 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5950 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5951 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5952 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5953 }
5954 
5955 static void
5956 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5957 					const struct drm_display_mode *native_mode,
5958 					bool scale_enabled)
5959 {
5960 	if (scale_enabled) {
5961 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5962 	} else if (native_mode->clock == drm_mode->clock &&
5963 			native_mode->htotal == drm_mode->htotal &&
5964 			native_mode->vtotal == drm_mode->vtotal) {
5965 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5966 	} else {
5967 		/* no scaling nor amdgpu inserted, no need to patch */
5968 	}
5969 }
5970 
5971 static struct dc_sink *
5972 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5973 {
5974 	struct dc_sink_init_data sink_init_data = { 0 };
5975 	struct dc_sink *sink = NULL;
5976 	sink_init_data.link = aconnector->dc_link;
5977 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5978 
5979 	sink = dc_sink_create(&sink_init_data);
5980 	if (!sink) {
5981 		DRM_ERROR("Failed to create sink!\n");
5982 		return NULL;
5983 	}
5984 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5985 
5986 	return sink;
5987 }
5988 
5989 static void set_multisync_trigger_params(
5990 		struct dc_stream_state *stream)
5991 {
5992 	struct dc_stream_state *master = NULL;
5993 
5994 	if (stream->triggered_crtc_reset.enabled) {
5995 		master = stream->triggered_crtc_reset.event_source;
5996 		stream->triggered_crtc_reset.event =
5997 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5998 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5999 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6000 	}
6001 }
6002 
6003 static void set_master_stream(struct dc_stream_state *stream_set[],
6004 			      int stream_count)
6005 {
6006 	int j, highest_rfr = 0, master_stream = 0;
6007 
6008 	for (j = 0;  j < stream_count; j++) {
6009 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6010 			int refresh_rate = 0;
6011 
6012 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6013 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6014 			if (refresh_rate > highest_rfr) {
6015 				highest_rfr = refresh_rate;
6016 				master_stream = j;
6017 			}
6018 		}
6019 	}
6020 	for (j = 0;  j < stream_count; j++) {
6021 		if (stream_set[j])
6022 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6023 	}
6024 }
6025 
6026 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6027 {
6028 	int i = 0;
6029 	struct dc_stream_state *stream;
6030 
6031 	if (context->stream_count < 2)
6032 		return;
6033 	for (i = 0; i < context->stream_count ; i++) {
6034 		if (!context->streams[i])
6035 			continue;
6036 		/*
6037 		 * TODO: add a function to read AMD VSDB bits and set
6038 		 * crtc_sync_master.multi_sync_enabled flag
6039 		 * For now it's set to false
6040 		 */
6041 	}
6042 
6043 	set_master_stream(context->streams, context->stream_count);
6044 
6045 	for (i = 0; i < context->stream_count ; i++) {
6046 		stream = context->streams[i];
6047 
6048 		if (!stream)
6049 			continue;
6050 
6051 		set_multisync_trigger_params(stream);
6052 	}
6053 }
6054 
6055 #if defined(CONFIG_DRM_AMD_DC_DCN)
6056 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6057 							struct dc_sink *sink, struct dc_stream_state *stream,
6058 							struct dsc_dec_dpcd_caps *dsc_caps)
6059 {
6060 	stream->timing.flags.DSC = 0;
6061 
6062 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6063 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6064 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6065 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6066 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6067 				      dsc_caps);
6068 	}
6069 }
6070 
6071 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6072 				    struct dc_sink *sink, struct dc_stream_state *stream,
6073 				    struct dsc_dec_dpcd_caps *dsc_caps,
6074 				    uint32_t max_dsc_target_bpp_limit_override)
6075 {
6076 	const struct dc_link_settings *verified_link_cap = NULL;
6077 	uint32_t link_bw_in_kbps;
6078 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6079 	struct dc *dc = sink->ctx->dc;
6080 	struct dc_dsc_bw_range bw_range = {0};
6081 	struct dc_dsc_config dsc_cfg = {0};
6082 
6083 	verified_link_cap = dc_link_get_link_cap(stream->link);
6084 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6085 	edp_min_bpp_x16 = 8 * 16;
6086 	edp_max_bpp_x16 = 8 * 16;
6087 
6088 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6089 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6090 
6091 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6092 		edp_min_bpp_x16 = edp_max_bpp_x16;
6093 
6094 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6095 				dc->debug.dsc_min_slice_height_override,
6096 				edp_min_bpp_x16, edp_max_bpp_x16,
6097 				dsc_caps,
6098 				&stream->timing,
6099 				&bw_range)) {
6100 
6101 		if (bw_range.max_kbps < link_bw_in_kbps) {
6102 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6103 					dsc_caps,
6104 					dc->debug.dsc_min_slice_height_override,
6105 					max_dsc_target_bpp_limit_override,
6106 					0,
6107 					&stream->timing,
6108 					&dsc_cfg)) {
6109 				stream->timing.dsc_cfg = dsc_cfg;
6110 				stream->timing.flags.DSC = 1;
6111 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6112 			}
6113 			return;
6114 		}
6115 	}
6116 
6117 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6118 				dsc_caps,
6119 				dc->debug.dsc_min_slice_height_override,
6120 				max_dsc_target_bpp_limit_override,
6121 				link_bw_in_kbps,
6122 				&stream->timing,
6123 				&dsc_cfg)) {
6124 		stream->timing.dsc_cfg = dsc_cfg;
6125 		stream->timing.flags.DSC = 1;
6126 	}
6127 }
6128 
6129 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6130 										struct dc_sink *sink, struct dc_stream_state *stream,
6131 										struct dsc_dec_dpcd_caps *dsc_caps)
6132 {
6133 	struct drm_connector *drm_connector = &aconnector->base;
6134 	uint32_t link_bandwidth_kbps;
6135 	uint32_t max_dsc_target_bpp_limit_override = 0;
6136 	struct dc *dc = sink->ctx->dc;
6137 
6138 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6139 							dc_link_get_link_cap(aconnector->dc_link));
6140 
6141 	if (stream->link && stream->link->local_sink)
6142 		max_dsc_target_bpp_limit_override =
6143 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6144 
6145 	/* Set DSC policy according to dsc_clock_en */
6146 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6147 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6148 
6149 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6150 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6151 
6152 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6153 
6154 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6155 
6156 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6157 						dsc_caps,
6158 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6159 						max_dsc_target_bpp_limit_override,
6160 						link_bandwidth_kbps,
6161 						&stream->timing,
6162 						&stream->timing.dsc_cfg)) {
6163 			stream->timing.flags.DSC = 1;
6164 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6165 		}
6166 	}
6167 
6168 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6169 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6170 		stream->timing.flags.DSC = 1;
6171 
6172 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6173 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6174 
6175 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6176 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6177 
6178 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6179 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6180 }
6181 #endif /* CONFIG_DRM_AMD_DC_DCN */
6182 
6183 /**
6184  * DOC: FreeSync Video
6185  *
6186  * When a userspace application wants to play a video, the content follows a
6187  * standard format definition that usually specifies the FPS for that format.
6188  * The below list illustrates some video format and the expected FPS,
6189  * respectively:
6190  *
6191  * - TV/NTSC (23.976 FPS)
6192  * - Cinema (24 FPS)
6193  * - TV/PAL (25 FPS)
6194  * - TV/NTSC (29.97 FPS)
6195  * - TV/NTSC (30 FPS)
6196  * - Cinema HFR (48 FPS)
6197  * - TV/PAL (50 FPS)
6198  * - Commonly used (60 FPS)
6199  * - Multiples of 24 (48,72,96,120 FPS)
6200  *
6201  * The list of standards video format is not huge and can be added to the
6202  * connector modeset list beforehand. With that, userspace can leverage
6203  * FreeSync to extends the front porch in order to attain the target refresh
6204  * rate. Such a switch will happen seamlessly, without screen blanking or
6205  * reprogramming of the output in any other way. If the userspace requests a
6206  * modesetting change compatible with FreeSync modes that only differ in the
6207  * refresh rate, DC will skip the full update and avoid blink during the
6208  * transition. For example, the video player can change the modesetting from
6209  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6210  * causing any display blink. This same concept can be applied to a mode
6211  * setting change.
6212  */
6213 static struct drm_display_mode *
6214 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6215 			  bool use_probed_modes)
6216 {
6217 	struct drm_display_mode *m, *m_pref = NULL;
6218 	u16 current_refresh, highest_refresh;
6219 	struct list_head *list_head = use_probed_modes ?
6220 						    &aconnector->base.probed_modes :
6221 						    &aconnector->base.modes;
6222 
6223 	if (aconnector->freesync_vid_base.clock != 0)
6224 		return &aconnector->freesync_vid_base;
6225 
6226 	/* Find the preferred mode */
6227 	list_for_each_entry (m, list_head, head) {
6228 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6229 			m_pref = m;
6230 			break;
6231 		}
6232 	}
6233 
6234 	if (!m_pref) {
6235 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6236 		m_pref = list_first_entry_or_null(
6237 			&aconnector->base.modes, struct drm_display_mode, head);
6238 		if (!m_pref) {
6239 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6240 			return NULL;
6241 		}
6242 	}
6243 
6244 	highest_refresh = drm_mode_vrefresh(m_pref);
6245 
6246 	/*
6247 	 * Find the mode with highest refresh rate with same resolution.
6248 	 * For some monitors, preferred mode is not the mode with highest
6249 	 * supported refresh rate.
6250 	 */
6251 	list_for_each_entry (m, list_head, head) {
6252 		current_refresh  = drm_mode_vrefresh(m);
6253 
6254 		if (m->hdisplay == m_pref->hdisplay &&
6255 		    m->vdisplay == m_pref->vdisplay &&
6256 		    highest_refresh < current_refresh) {
6257 			highest_refresh = current_refresh;
6258 			m_pref = m;
6259 		}
6260 	}
6261 
6262 	aconnector->freesync_vid_base = *m_pref;
6263 	return m_pref;
6264 }
6265 
6266 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6267 				   struct amdgpu_dm_connector *aconnector)
6268 {
6269 	struct drm_display_mode *high_mode;
6270 	int timing_diff;
6271 
6272 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6273 	if (!high_mode || !mode)
6274 		return false;
6275 
6276 	timing_diff = high_mode->vtotal - mode->vtotal;
6277 
6278 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6279 	    high_mode->hdisplay != mode->hdisplay ||
6280 	    high_mode->vdisplay != mode->vdisplay ||
6281 	    high_mode->hsync_start != mode->hsync_start ||
6282 	    high_mode->hsync_end != mode->hsync_end ||
6283 	    high_mode->htotal != mode->htotal ||
6284 	    high_mode->hskew != mode->hskew ||
6285 	    high_mode->vscan != mode->vscan ||
6286 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6287 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6288 		return false;
6289 	else
6290 		return true;
6291 }
6292 
6293 static struct dc_stream_state *
6294 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6295 		       const struct drm_display_mode *drm_mode,
6296 		       const struct dm_connector_state *dm_state,
6297 		       const struct dc_stream_state *old_stream,
6298 		       int requested_bpc)
6299 {
6300 	struct drm_display_mode *preferred_mode = NULL;
6301 	struct drm_connector *drm_connector;
6302 	const struct drm_connector_state *con_state =
6303 		dm_state ? &dm_state->base : NULL;
6304 	struct dc_stream_state *stream = NULL;
6305 	struct drm_display_mode mode = *drm_mode;
6306 	struct drm_display_mode saved_mode;
6307 	struct drm_display_mode *freesync_mode = NULL;
6308 	bool native_mode_found = false;
6309 	bool recalculate_timing = false;
6310 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6311 	int mode_refresh;
6312 	int preferred_refresh = 0;
6313 #if defined(CONFIG_DRM_AMD_DC_DCN)
6314 	struct dsc_dec_dpcd_caps dsc_caps;
6315 #endif
6316 	struct dc_sink *sink = NULL;
6317 
6318 	memset(&saved_mode, 0, sizeof(saved_mode));
6319 
6320 	if (aconnector == NULL) {
6321 		DRM_ERROR("aconnector is NULL!\n");
6322 		return stream;
6323 	}
6324 
6325 	drm_connector = &aconnector->base;
6326 
6327 	if (!aconnector->dc_sink) {
6328 		sink = create_fake_sink(aconnector);
6329 		if (!sink)
6330 			return stream;
6331 	} else {
6332 		sink = aconnector->dc_sink;
6333 		dc_sink_retain(sink);
6334 	}
6335 
6336 	stream = dc_create_stream_for_sink(sink);
6337 
6338 	if (stream == NULL) {
6339 		DRM_ERROR("Failed to create stream for sink!\n");
6340 		goto finish;
6341 	}
6342 
6343 	stream->dm_stream_context = aconnector;
6344 
6345 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6346 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6347 
6348 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6349 		/* Search for preferred mode */
6350 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6351 			native_mode_found = true;
6352 			break;
6353 		}
6354 	}
6355 	if (!native_mode_found)
6356 		preferred_mode = list_first_entry_or_null(
6357 				&aconnector->base.modes,
6358 				struct drm_display_mode,
6359 				head);
6360 
6361 	mode_refresh = drm_mode_vrefresh(&mode);
6362 
6363 	if (preferred_mode == NULL) {
6364 		/*
6365 		 * This may not be an error, the use case is when we have no
6366 		 * usermode calls to reset and set mode upon hotplug. In this
6367 		 * case, we call set mode ourselves to restore the previous mode
6368 		 * and the modelist may not be filled in in time.
6369 		 */
6370 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6371 	} else {
6372 		recalculate_timing = amdgpu_freesync_vid_mode &&
6373 				 is_freesync_video_mode(&mode, aconnector);
6374 		if (recalculate_timing) {
6375 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6376 			saved_mode = mode;
6377 			mode = *freesync_mode;
6378 		} else {
6379 			decide_crtc_timing_for_drm_display_mode(
6380 				&mode, preferred_mode, scale);
6381 
6382 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6383 		}
6384 	}
6385 
6386 	if (recalculate_timing)
6387 		drm_mode_set_crtcinfo(&saved_mode, 0);
6388 	else if (!dm_state)
6389 		drm_mode_set_crtcinfo(&mode, 0);
6390 
6391        /*
6392 	* If scaling is enabled and refresh rate didn't change
6393 	* we copy the vic and polarities of the old timings
6394 	*/
6395 	if (!scale || mode_refresh != preferred_refresh)
6396 		fill_stream_properties_from_drm_display_mode(
6397 			stream, &mode, &aconnector->base, con_state, NULL,
6398 			requested_bpc);
6399 	else
6400 		fill_stream_properties_from_drm_display_mode(
6401 			stream, &mode, &aconnector->base, con_state, old_stream,
6402 			requested_bpc);
6403 
6404 #if defined(CONFIG_DRM_AMD_DC_DCN)
6405 	/* SST DSC determination policy */
6406 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6407 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6408 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6409 #endif
6410 
6411 	update_stream_scaling_settings(&mode, dm_state, stream);
6412 
6413 	fill_audio_info(
6414 		&stream->audio_info,
6415 		drm_connector,
6416 		sink);
6417 
6418 	update_stream_signal(stream, sink);
6419 
6420 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6421 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6422 
6423 	if (stream->link->psr_settings.psr_feature_enabled) {
6424 		//
6425 		// should decide stream support vsc sdp colorimetry capability
6426 		// before building vsc info packet
6427 		//
6428 		stream->use_vsc_sdp_for_colorimetry = false;
6429 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6430 			stream->use_vsc_sdp_for_colorimetry =
6431 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6432 		} else {
6433 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6434 				stream->use_vsc_sdp_for_colorimetry = true;
6435 		}
6436 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6437 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6438 
6439 	}
6440 finish:
6441 	dc_sink_release(sink);
6442 
6443 	return stream;
6444 }
6445 
6446 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6447 {
6448 	drm_crtc_cleanup(crtc);
6449 	kfree(crtc);
6450 }
6451 
6452 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6453 				  struct drm_crtc_state *state)
6454 {
6455 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6456 
6457 	/* TODO Destroy dc_stream objects are stream object is flattened */
6458 	if (cur->stream)
6459 		dc_stream_release(cur->stream);
6460 
6461 
6462 	__drm_atomic_helper_crtc_destroy_state(state);
6463 
6464 
6465 	kfree(state);
6466 }
6467 
6468 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6469 {
6470 	struct dm_crtc_state *state;
6471 
6472 	if (crtc->state)
6473 		dm_crtc_destroy_state(crtc, crtc->state);
6474 
6475 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6476 	if (WARN_ON(!state))
6477 		return;
6478 
6479 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6480 }
6481 
6482 static struct drm_crtc_state *
6483 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6484 {
6485 	struct dm_crtc_state *state, *cur;
6486 
6487 	cur = to_dm_crtc_state(crtc->state);
6488 
6489 	if (WARN_ON(!crtc->state))
6490 		return NULL;
6491 
6492 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6493 	if (!state)
6494 		return NULL;
6495 
6496 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6497 
6498 	if (cur->stream) {
6499 		state->stream = cur->stream;
6500 		dc_stream_retain(state->stream);
6501 	}
6502 
6503 	state->active_planes = cur->active_planes;
6504 	state->vrr_infopacket = cur->vrr_infopacket;
6505 	state->abm_level = cur->abm_level;
6506 	state->vrr_supported = cur->vrr_supported;
6507 	state->freesync_config = cur->freesync_config;
6508 	state->cm_has_degamma = cur->cm_has_degamma;
6509 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6510 	state->force_dpms_off = cur->force_dpms_off;
6511 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6512 
6513 	return &state->base;
6514 }
6515 
6516 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6517 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6518 {
6519 	crtc_debugfs_init(crtc);
6520 
6521 	return 0;
6522 }
6523 #endif
6524 
6525 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6526 {
6527 	enum dc_irq_source irq_source;
6528 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6529 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6530 	int rc;
6531 
6532 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6533 
6534 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6535 
6536 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6537 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6538 	return rc;
6539 }
6540 
6541 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6542 {
6543 	enum dc_irq_source irq_source;
6544 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6545 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6546 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6547 #if defined(CONFIG_DRM_AMD_DC_DCN)
6548 	struct amdgpu_display_manager *dm = &adev->dm;
6549 	struct vblank_control_work *work;
6550 #endif
6551 	int rc = 0;
6552 
6553 	if (enable) {
6554 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6555 		if (amdgpu_dm_vrr_active(acrtc_state))
6556 			rc = dm_set_vupdate_irq(crtc, true);
6557 	} else {
6558 		/* vblank irq off -> vupdate irq off */
6559 		rc = dm_set_vupdate_irq(crtc, false);
6560 	}
6561 
6562 	if (rc)
6563 		return rc;
6564 
6565 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6566 
6567 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6568 		return -EBUSY;
6569 
6570 	if (amdgpu_in_reset(adev))
6571 		return 0;
6572 
6573 #if defined(CONFIG_DRM_AMD_DC_DCN)
6574 	if (dm->vblank_control_workqueue) {
6575 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6576 		if (!work)
6577 			return -ENOMEM;
6578 
6579 		INIT_WORK(&work->work, vblank_control_worker);
6580 		work->dm = dm;
6581 		work->acrtc = acrtc;
6582 		work->enable = enable;
6583 
6584 		if (acrtc_state->stream) {
6585 			dc_stream_retain(acrtc_state->stream);
6586 			work->stream = acrtc_state->stream;
6587 		}
6588 
6589 		queue_work(dm->vblank_control_workqueue, &work->work);
6590 	}
6591 #endif
6592 
6593 	return 0;
6594 }
6595 
6596 static int dm_enable_vblank(struct drm_crtc *crtc)
6597 {
6598 	return dm_set_vblank(crtc, true);
6599 }
6600 
6601 static void dm_disable_vblank(struct drm_crtc *crtc)
6602 {
6603 	dm_set_vblank(crtc, false);
6604 }
6605 
6606 /* Implemented only the options currently availible for the driver */
6607 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6608 	.reset = dm_crtc_reset_state,
6609 	.destroy = amdgpu_dm_crtc_destroy,
6610 	.set_config = drm_atomic_helper_set_config,
6611 	.page_flip = drm_atomic_helper_page_flip,
6612 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6613 	.atomic_destroy_state = dm_crtc_destroy_state,
6614 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6615 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6616 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6617 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6618 	.enable_vblank = dm_enable_vblank,
6619 	.disable_vblank = dm_disable_vblank,
6620 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6621 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6622 	.late_register = amdgpu_dm_crtc_late_register,
6623 #endif
6624 };
6625 
6626 static enum drm_connector_status
6627 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6628 {
6629 	bool connected;
6630 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6631 
6632 	/*
6633 	 * Notes:
6634 	 * 1. This interface is NOT called in context of HPD irq.
6635 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6636 	 * makes it a bad place for *any* MST-related activity.
6637 	 */
6638 
6639 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6640 	    !aconnector->fake_enable)
6641 		connected = (aconnector->dc_sink != NULL);
6642 	else
6643 		connected = (aconnector->base.force == DRM_FORCE_ON);
6644 
6645 	update_subconnector_property(aconnector);
6646 
6647 	return (connected ? connector_status_connected :
6648 			connector_status_disconnected);
6649 }
6650 
6651 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6652 					    struct drm_connector_state *connector_state,
6653 					    struct drm_property *property,
6654 					    uint64_t val)
6655 {
6656 	struct drm_device *dev = connector->dev;
6657 	struct amdgpu_device *adev = drm_to_adev(dev);
6658 	struct dm_connector_state *dm_old_state =
6659 		to_dm_connector_state(connector->state);
6660 	struct dm_connector_state *dm_new_state =
6661 		to_dm_connector_state(connector_state);
6662 
6663 	int ret = -EINVAL;
6664 
6665 	if (property == dev->mode_config.scaling_mode_property) {
6666 		enum amdgpu_rmx_type rmx_type;
6667 
6668 		switch (val) {
6669 		case DRM_MODE_SCALE_CENTER:
6670 			rmx_type = RMX_CENTER;
6671 			break;
6672 		case DRM_MODE_SCALE_ASPECT:
6673 			rmx_type = RMX_ASPECT;
6674 			break;
6675 		case DRM_MODE_SCALE_FULLSCREEN:
6676 			rmx_type = RMX_FULL;
6677 			break;
6678 		case DRM_MODE_SCALE_NONE:
6679 		default:
6680 			rmx_type = RMX_OFF;
6681 			break;
6682 		}
6683 
6684 		if (dm_old_state->scaling == rmx_type)
6685 			return 0;
6686 
6687 		dm_new_state->scaling = rmx_type;
6688 		ret = 0;
6689 	} else if (property == adev->mode_info.underscan_hborder_property) {
6690 		dm_new_state->underscan_hborder = val;
6691 		ret = 0;
6692 	} else if (property == adev->mode_info.underscan_vborder_property) {
6693 		dm_new_state->underscan_vborder = val;
6694 		ret = 0;
6695 	} else if (property == adev->mode_info.underscan_property) {
6696 		dm_new_state->underscan_enable = val;
6697 		ret = 0;
6698 	} else if (property == adev->mode_info.abm_level_property) {
6699 		dm_new_state->abm_level = val;
6700 		ret = 0;
6701 	}
6702 
6703 	return ret;
6704 }
6705 
6706 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6707 					    const struct drm_connector_state *state,
6708 					    struct drm_property *property,
6709 					    uint64_t *val)
6710 {
6711 	struct drm_device *dev = connector->dev;
6712 	struct amdgpu_device *adev = drm_to_adev(dev);
6713 	struct dm_connector_state *dm_state =
6714 		to_dm_connector_state(state);
6715 	int ret = -EINVAL;
6716 
6717 	if (property == dev->mode_config.scaling_mode_property) {
6718 		switch (dm_state->scaling) {
6719 		case RMX_CENTER:
6720 			*val = DRM_MODE_SCALE_CENTER;
6721 			break;
6722 		case RMX_ASPECT:
6723 			*val = DRM_MODE_SCALE_ASPECT;
6724 			break;
6725 		case RMX_FULL:
6726 			*val = DRM_MODE_SCALE_FULLSCREEN;
6727 			break;
6728 		case RMX_OFF:
6729 		default:
6730 			*val = DRM_MODE_SCALE_NONE;
6731 			break;
6732 		}
6733 		ret = 0;
6734 	} else if (property == adev->mode_info.underscan_hborder_property) {
6735 		*val = dm_state->underscan_hborder;
6736 		ret = 0;
6737 	} else if (property == adev->mode_info.underscan_vborder_property) {
6738 		*val = dm_state->underscan_vborder;
6739 		ret = 0;
6740 	} else if (property == adev->mode_info.underscan_property) {
6741 		*val = dm_state->underscan_enable;
6742 		ret = 0;
6743 	} else if (property == adev->mode_info.abm_level_property) {
6744 		*val = dm_state->abm_level;
6745 		ret = 0;
6746 	}
6747 
6748 	return ret;
6749 }
6750 
6751 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6752 {
6753 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6754 
6755 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6756 }
6757 
6758 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6759 {
6760 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6761 	const struct dc_link *link = aconnector->dc_link;
6762 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6763 	struct amdgpu_display_manager *dm = &adev->dm;
6764 	int i;
6765 
6766 	/*
6767 	 * Call only if mst_mgr was iniitalized before since it's not done
6768 	 * for all connector types.
6769 	 */
6770 	if (aconnector->mst_mgr.dev)
6771 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6772 
6773 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6774 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6775 	for (i = 0; i < dm->num_of_edps; i++) {
6776 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6777 			backlight_device_unregister(dm->backlight_dev[i]);
6778 			dm->backlight_dev[i] = NULL;
6779 		}
6780 	}
6781 #endif
6782 
6783 	if (aconnector->dc_em_sink)
6784 		dc_sink_release(aconnector->dc_em_sink);
6785 	aconnector->dc_em_sink = NULL;
6786 	if (aconnector->dc_sink)
6787 		dc_sink_release(aconnector->dc_sink);
6788 	aconnector->dc_sink = NULL;
6789 
6790 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6791 	drm_connector_unregister(connector);
6792 	drm_connector_cleanup(connector);
6793 	if (aconnector->i2c) {
6794 		i2c_del_adapter(&aconnector->i2c->base);
6795 		kfree(aconnector->i2c);
6796 	}
6797 	kfree(aconnector->dm_dp_aux.aux.name);
6798 
6799 	kfree(connector);
6800 }
6801 
6802 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6803 {
6804 	struct dm_connector_state *state =
6805 		to_dm_connector_state(connector->state);
6806 
6807 	if (connector->state)
6808 		__drm_atomic_helper_connector_destroy_state(connector->state);
6809 
6810 	kfree(state);
6811 
6812 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6813 
6814 	if (state) {
6815 		state->scaling = RMX_OFF;
6816 		state->underscan_enable = false;
6817 		state->underscan_hborder = 0;
6818 		state->underscan_vborder = 0;
6819 		state->base.max_requested_bpc = 8;
6820 		state->vcpi_slots = 0;
6821 		state->pbn = 0;
6822 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6823 			state->abm_level = amdgpu_dm_abm_level;
6824 
6825 		__drm_atomic_helper_connector_reset(connector, &state->base);
6826 	}
6827 }
6828 
6829 struct drm_connector_state *
6830 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6831 {
6832 	struct dm_connector_state *state =
6833 		to_dm_connector_state(connector->state);
6834 
6835 	struct dm_connector_state *new_state =
6836 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6837 
6838 	if (!new_state)
6839 		return NULL;
6840 
6841 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6842 
6843 	new_state->freesync_capable = state->freesync_capable;
6844 	new_state->abm_level = state->abm_level;
6845 	new_state->scaling = state->scaling;
6846 	new_state->underscan_enable = state->underscan_enable;
6847 	new_state->underscan_hborder = state->underscan_hborder;
6848 	new_state->underscan_vborder = state->underscan_vborder;
6849 	new_state->vcpi_slots = state->vcpi_slots;
6850 	new_state->pbn = state->pbn;
6851 	return &new_state->base;
6852 }
6853 
6854 static int
6855 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6856 {
6857 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6858 		to_amdgpu_dm_connector(connector);
6859 	int r;
6860 
6861 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6862 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6863 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6864 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6865 		if (r)
6866 			return r;
6867 	}
6868 
6869 #if defined(CONFIG_DEBUG_FS)
6870 	connector_debugfs_init(amdgpu_dm_connector);
6871 #endif
6872 
6873 	return 0;
6874 }
6875 
6876 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6877 	.reset = amdgpu_dm_connector_funcs_reset,
6878 	.detect = amdgpu_dm_connector_detect,
6879 	.fill_modes = drm_helper_probe_single_connector_modes,
6880 	.destroy = amdgpu_dm_connector_destroy,
6881 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6882 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6883 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6884 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6885 	.late_register = amdgpu_dm_connector_late_register,
6886 	.early_unregister = amdgpu_dm_connector_unregister
6887 };
6888 
6889 static int get_modes(struct drm_connector *connector)
6890 {
6891 	return amdgpu_dm_connector_get_modes(connector);
6892 }
6893 
6894 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6895 {
6896 	struct dc_sink_init_data init_params = {
6897 			.link = aconnector->dc_link,
6898 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6899 	};
6900 	struct edid *edid;
6901 
6902 	if (!aconnector->base.edid_blob_ptr) {
6903 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6904 				aconnector->base.name);
6905 
6906 		aconnector->base.force = DRM_FORCE_OFF;
6907 		aconnector->base.override_edid = false;
6908 		return;
6909 	}
6910 
6911 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6912 
6913 	aconnector->edid = edid;
6914 
6915 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6916 		aconnector->dc_link,
6917 		(uint8_t *)edid,
6918 		(edid->extensions + 1) * EDID_LENGTH,
6919 		&init_params);
6920 
6921 	if (aconnector->base.force == DRM_FORCE_ON) {
6922 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6923 		aconnector->dc_link->local_sink :
6924 		aconnector->dc_em_sink;
6925 		dc_sink_retain(aconnector->dc_sink);
6926 	}
6927 }
6928 
6929 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6930 {
6931 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6932 
6933 	/*
6934 	 * In case of headless boot with force on for DP managed connector
6935 	 * Those settings have to be != 0 to get initial modeset
6936 	 */
6937 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6938 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6939 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6940 	}
6941 
6942 
6943 	aconnector->base.override_edid = true;
6944 	create_eml_sink(aconnector);
6945 }
6946 
6947 static struct dc_stream_state *
6948 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6949 				const struct drm_display_mode *drm_mode,
6950 				const struct dm_connector_state *dm_state,
6951 				const struct dc_stream_state *old_stream)
6952 {
6953 	struct drm_connector *connector = &aconnector->base;
6954 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6955 	struct dc_stream_state *stream;
6956 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6957 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6958 	enum dc_status dc_result = DC_OK;
6959 
6960 	do {
6961 		stream = create_stream_for_sink(aconnector, drm_mode,
6962 						dm_state, old_stream,
6963 						requested_bpc);
6964 		if (stream == NULL) {
6965 			DRM_ERROR("Failed to create stream for sink!\n");
6966 			break;
6967 		}
6968 
6969 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6970 
6971 		if (dc_result != DC_OK) {
6972 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6973 				      drm_mode->hdisplay,
6974 				      drm_mode->vdisplay,
6975 				      drm_mode->clock,
6976 				      dc_result,
6977 				      dc_status_to_str(dc_result));
6978 
6979 			dc_stream_release(stream);
6980 			stream = NULL;
6981 			requested_bpc -= 2; /* lower bpc to retry validation */
6982 		}
6983 
6984 	} while (stream == NULL && requested_bpc >= 6);
6985 
6986 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6987 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6988 
6989 		aconnector->force_yuv420_output = true;
6990 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6991 						dm_state, old_stream);
6992 		aconnector->force_yuv420_output = false;
6993 	}
6994 
6995 	return stream;
6996 }
6997 
6998 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6999 				   struct drm_display_mode *mode)
7000 {
7001 	int result = MODE_ERROR;
7002 	struct dc_sink *dc_sink;
7003 	/* TODO: Unhardcode stream count */
7004 	struct dc_stream_state *stream;
7005 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7006 
7007 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7008 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7009 		return result;
7010 
7011 	/*
7012 	 * Only run this the first time mode_valid is called to initilialize
7013 	 * EDID mgmt
7014 	 */
7015 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7016 		!aconnector->dc_em_sink)
7017 		handle_edid_mgmt(aconnector);
7018 
7019 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7020 
7021 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7022 				aconnector->base.force != DRM_FORCE_ON) {
7023 		DRM_ERROR("dc_sink is NULL!\n");
7024 		goto fail;
7025 	}
7026 
7027 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7028 	if (stream) {
7029 		dc_stream_release(stream);
7030 		result = MODE_OK;
7031 	}
7032 
7033 fail:
7034 	/* TODO: error handling*/
7035 	return result;
7036 }
7037 
7038 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7039 				struct dc_info_packet *out)
7040 {
7041 	struct hdmi_drm_infoframe frame;
7042 	unsigned char buf[30]; /* 26 + 4 */
7043 	ssize_t len;
7044 	int ret, i;
7045 
7046 	memset(out, 0, sizeof(*out));
7047 
7048 	if (!state->hdr_output_metadata)
7049 		return 0;
7050 
7051 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7052 	if (ret)
7053 		return ret;
7054 
7055 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7056 	if (len < 0)
7057 		return (int)len;
7058 
7059 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7060 	if (len != 30)
7061 		return -EINVAL;
7062 
7063 	/* Prepare the infopacket for DC. */
7064 	switch (state->connector->connector_type) {
7065 	case DRM_MODE_CONNECTOR_HDMIA:
7066 		out->hb0 = 0x87; /* type */
7067 		out->hb1 = 0x01; /* version */
7068 		out->hb2 = 0x1A; /* length */
7069 		out->sb[0] = buf[3]; /* checksum */
7070 		i = 1;
7071 		break;
7072 
7073 	case DRM_MODE_CONNECTOR_DisplayPort:
7074 	case DRM_MODE_CONNECTOR_eDP:
7075 		out->hb0 = 0x00; /* sdp id, zero */
7076 		out->hb1 = 0x87; /* type */
7077 		out->hb2 = 0x1D; /* payload len - 1 */
7078 		out->hb3 = (0x13 << 2); /* sdp version */
7079 		out->sb[0] = 0x01; /* version */
7080 		out->sb[1] = 0x1A; /* length */
7081 		i = 2;
7082 		break;
7083 
7084 	default:
7085 		return -EINVAL;
7086 	}
7087 
7088 	memcpy(&out->sb[i], &buf[4], 26);
7089 	out->valid = true;
7090 
7091 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7092 		       sizeof(out->sb), false);
7093 
7094 	return 0;
7095 }
7096 
7097 static int
7098 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7099 				 struct drm_atomic_state *state)
7100 {
7101 	struct drm_connector_state *new_con_state =
7102 		drm_atomic_get_new_connector_state(state, conn);
7103 	struct drm_connector_state *old_con_state =
7104 		drm_atomic_get_old_connector_state(state, conn);
7105 	struct drm_crtc *crtc = new_con_state->crtc;
7106 	struct drm_crtc_state *new_crtc_state;
7107 	int ret;
7108 
7109 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7110 
7111 	if (!crtc)
7112 		return 0;
7113 
7114 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7115 		struct dc_info_packet hdr_infopacket;
7116 
7117 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7118 		if (ret)
7119 			return ret;
7120 
7121 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7122 		if (IS_ERR(new_crtc_state))
7123 			return PTR_ERR(new_crtc_state);
7124 
7125 		/*
7126 		 * DC considers the stream backends changed if the
7127 		 * static metadata changes. Forcing the modeset also
7128 		 * gives a simple way for userspace to switch from
7129 		 * 8bpc to 10bpc when setting the metadata to enter
7130 		 * or exit HDR.
7131 		 *
7132 		 * Changing the static metadata after it's been
7133 		 * set is permissible, however. So only force a
7134 		 * modeset if we're entering or exiting HDR.
7135 		 */
7136 		new_crtc_state->mode_changed =
7137 			!old_con_state->hdr_output_metadata ||
7138 			!new_con_state->hdr_output_metadata;
7139 	}
7140 
7141 	return 0;
7142 }
7143 
7144 static const struct drm_connector_helper_funcs
7145 amdgpu_dm_connector_helper_funcs = {
7146 	/*
7147 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7148 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7149 	 * are missing after user start lightdm. So we need to renew modes list.
7150 	 * in get_modes call back, not just return the modes count
7151 	 */
7152 	.get_modes = get_modes,
7153 	.mode_valid = amdgpu_dm_connector_mode_valid,
7154 	.atomic_check = amdgpu_dm_connector_atomic_check,
7155 };
7156 
7157 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7158 {
7159 }
7160 
7161 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7162 {
7163 	struct drm_atomic_state *state = new_crtc_state->state;
7164 	struct drm_plane *plane;
7165 	int num_active = 0;
7166 
7167 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7168 		struct drm_plane_state *new_plane_state;
7169 
7170 		/* Cursor planes are "fake". */
7171 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7172 			continue;
7173 
7174 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7175 
7176 		if (!new_plane_state) {
7177 			/*
7178 			 * The plane is enable on the CRTC and hasn't changed
7179 			 * state. This means that it previously passed
7180 			 * validation and is therefore enabled.
7181 			 */
7182 			num_active += 1;
7183 			continue;
7184 		}
7185 
7186 		/* We need a framebuffer to be considered enabled. */
7187 		num_active += (new_plane_state->fb != NULL);
7188 	}
7189 
7190 	return num_active;
7191 }
7192 
7193 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7194 					 struct drm_crtc_state *new_crtc_state)
7195 {
7196 	struct dm_crtc_state *dm_new_crtc_state =
7197 		to_dm_crtc_state(new_crtc_state);
7198 
7199 	dm_new_crtc_state->active_planes = 0;
7200 
7201 	if (!dm_new_crtc_state->stream)
7202 		return;
7203 
7204 	dm_new_crtc_state->active_planes =
7205 		count_crtc_active_planes(new_crtc_state);
7206 }
7207 
7208 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7209 				       struct drm_atomic_state *state)
7210 {
7211 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7212 									  crtc);
7213 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7214 	struct dc *dc = adev->dm.dc;
7215 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7216 	int ret = -EINVAL;
7217 
7218 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7219 
7220 	dm_update_crtc_active_planes(crtc, crtc_state);
7221 
7222 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7223 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7224 		return ret;
7225 	}
7226 
7227 	/*
7228 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7229 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7230 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7231 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7232 	 */
7233 	if (crtc_state->enable &&
7234 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7235 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7236 		return -EINVAL;
7237 	}
7238 
7239 	/* In some use cases, like reset, no stream is attached */
7240 	if (!dm_crtc_state->stream)
7241 		return 0;
7242 
7243 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7244 		return 0;
7245 
7246 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7247 	return ret;
7248 }
7249 
7250 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7251 				      const struct drm_display_mode *mode,
7252 				      struct drm_display_mode *adjusted_mode)
7253 {
7254 	return true;
7255 }
7256 
7257 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7258 	.disable = dm_crtc_helper_disable,
7259 	.atomic_check = dm_crtc_helper_atomic_check,
7260 	.mode_fixup = dm_crtc_helper_mode_fixup,
7261 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7262 };
7263 
7264 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7265 {
7266 
7267 }
7268 
7269 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7270 {
7271 	switch (display_color_depth) {
7272 		case COLOR_DEPTH_666:
7273 			return 6;
7274 		case COLOR_DEPTH_888:
7275 			return 8;
7276 		case COLOR_DEPTH_101010:
7277 			return 10;
7278 		case COLOR_DEPTH_121212:
7279 			return 12;
7280 		case COLOR_DEPTH_141414:
7281 			return 14;
7282 		case COLOR_DEPTH_161616:
7283 			return 16;
7284 		default:
7285 			break;
7286 		}
7287 	return 0;
7288 }
7289 
7290 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7291 					  struct drm_crtc_state *crtc_state,
7292 					  struct drm_connector_state *conn_state)
7293 {
7294 	struct drm_atomic_state *state = crtc_state->state;
7295 	struct drm_connector *connector = conn_state->connector;
7296 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7297 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7298 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7299 	struct drm_dp_mst_topology_mgr *mst_mgr;
7300 	struct drm_dp_mst_port *mst_port;
7301 	enum dc_color_depth color_depth;
7302 	int clock, bpp = 0;
7303 	bool is_y420 = false;
7304 
7305 	if (!aconnector->port || !aconnector->dc_sink)
7306 		return 0;
7307 
7308 	mst_port = aconnector->port;
7309 	mst_mgr = &aconnector->mst_port->mst_mgr;
7310 
7311 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7312 		return 0;
7313 
7314 	if (!state->duplicated) {
7315 		int max_bpc = conn_state->max_requested_bpc;
7316 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7317 				aconnector->force_yuv420_output;
7318 		color_depth = convert_color_depth_from_display_info(connector,
7319 								    is_y420,
7320 								    max_bpc);
7321 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7322 		clock = adjusted_mode->clock;
7323 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7324 	}
7325 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7326 									   mst_mgr,
7327 									   mst_port,
7328 									   dm_new_connector_state->pbn,
7329 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7330 	if (dm_new_connector_state->vcpi_slots < 0) {
7331 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7332 		return dm_new_connector_state->vcpi_slots;
7333 	}
7334 	return 0;
7335 }
7336 
7337 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7338 	.disable = dm_encoder_helper_disable,
7339 	.atomic_check = dm_encoder_helper_atomic_check
7340 };
7341 
7342 #if defined(CONFIG_DRM_AMD_DC_DCN)
7343 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7344 					    struct dc_state *dc_state,
7345 					    struct dsc_mst_fairness_vars *vars)
7346 {
7347 	struct dc_stream_state *stream = NULL;
7348 	struct drm_connector *connector;
7349 	struct drm_connector_state *new_con_state;
7350 	struct amdgpu_dm_connector *aconnector;
7351 	struct dm_connector_state *dm_conn_state;
7352 	int i, j;
7353 	int vcpi, pbn_div, pbn, slot_num = 0;
7354 
7355 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7356 
7357 		aconnector = to_amdgpu_dm_connector(connector);
7358 
7359 		if (!aconnector->port)
7360 			continue;
7361 
7362 		if (!new_con_state || !new_con_state->crtc)
7363 			continue;
7364 
7365 		dm_conn_state = to_dm_connector_state(new_con_state);
7366 
7367 		for (j = 0; j < dc_state->stream_count; j++) {
7368 			stream = dc_state->streams[j];
7369 			if (!stream)
7370 				continue;
7371 
7372 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7373 				break;
7374 
7375 			stream = NULL;
7376 		}
7377 
7378 		if (!stream)
7379 			continue;
7380 
7381 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7382 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7383 		for (j = 0; j < dc_state->stream_count; j++) {
7384 			if (vars[j].aconnector == aconnector) {
7385 				pbn = vars[j].pbn;
7386 				break;
7387 			}
7388 		}
7389 
7390 		if (j == dc_state->stream_count)
7391 			continue;
7392 
7393 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7394 
7395 		if (stream->timing.flags.DSC != 1) {
7396 			dm_conn_state->pbn = pbn;
7397 			dm_conn_state->vcpi_slots = slot_num;
7398 
7399 			drm_dp_mst_atomic_enable_dsc(state,
7400 						     aconnector->port,
7401 						     dm_conn_state->pbn,
7402 						     0,
7403 						     false);
7404 			continue;
7405 		}
7406 
7407 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7408 						    aconnector->port,
7409 						    pbn, pbn_div,
7410 						    true);
7411 		if (vcpi < 0)
7412 			return vcpi;
7413 
7414 		dm_conn_state->pbn = pbn;
7415 		dm_conn_state->vcpi_slots = vcpi;
7416 	}
7417 	return 0;
7418 }
7419 #endif
7420 
7421 static void dm_drm_plane_reset(struct drm_plane *plane)
7422 {
7423 	struct dm_plane_state *amdgpu_state = NULL;
7424 
7425 	if (plane->state)
7426 		plane->funcs->atomic_destroy_state(plane, plane->state);
7427 
7428 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7429 	WARN_ON(amdgpu_state == NULL);
7430 
7431 	if (amdgpu_state)
7432 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7433 }
7434 
7435 static struct drm_plane_state *
7436 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7437 {
7438 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7439 
7440 	old_dm_plane_state = to_dm_plane_state(plane->state);
7441 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7442 	if (!dm_plane_state)
7443 		return NULL;
7444 
7445 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7446 
7447 	if (old_dm_plane_state->dc_state) {
7448 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7449 		dc_plane_state_retain(dm_plane_state->dc_state);
7450 	}
7451 
7452 	return &dm_plane_state->base;
7453 }
7454 
7455 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7456 				struct drm_plane_state *state)
7457 {
7458 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7459 
7460 	if (dm_plane_state->dc_state)
7461 		dc_plane_state_release(dm_plane_state->dc_state);
7462 
7463 	drm_atomic_helper_plane_destroy_state(plane, state);
7464 }
7465 
7466 static const struct drm_plane_funcs dm_plane_funcs = {
7467 	.update_plane	= drm_atomic_helper_update_plane,
7468 	.disable_plane	= drm_atomic_helper_disable_plane,
7469 	.destroy	= drm_primary_helper_destroy,
7470 	.reset = dm_drm_plane_reset,
7471 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7472 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7473 	.format_mod_supported = dm_plane_format_mod_supported,
7474 };
7475 
7476 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7477 				      struct drm_plane_state *new_state)
7478 {
7479 	struct amdgpu_framebuffer *afb;
7480 	struct drm_gem_object *obj;
7481 	struct amdgpu_device *adev;
7482 	struct amdgpu_bo *rbo;
7483 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7484 	struct list_head list;
7485 	struct ttm_validate_buffer tv;
7486 	struct ww_acquire_ctx ticket;
7487 	uint32_t domain;
7488 	int r;
7489 
7490 	if (!new_state->fb) {
7491 		DRM_DEBUG_KMS("No FB bound\n");
7492 		return 0;
7493 	}
7494 
7495 	afb = to_amdgpu_framebuffer(new_state->fb);
7496 	obj = new_state->fb->obj[0];
7497 	rbo = gem_to_amdgpu_bo(obj);
7498 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7499 	INIT_LIST_HEAD(&list);
7500 
7501 	tv.bo = &rbo->tbo;
7502 	tv.num_shared = 1;
7503 	list_add(&tv.head, &list);
7504 
7505 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7506 	if (r) {
7507 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7508 		return r;
7509 	}
7510 
7511 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7512 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7513 	else
7514 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7515 
7516 	r = amdgpu_bo_pin(rbo, domain);
7517 	if (unlikely(r != 0)) {
7518 		if (r != -ERESTARTSYS)
7519 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7520 		ttm_eu_backoff_reservation(&ticket, &list);
7521 		return r;
7522 	}
7523 
7524 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7525 	if (unlikely(r != 0)) {
7526 		amdgpu_bo_unpin(rbo);
7527 		ttm_eu_backoff_reservation(&ticket, &list);
7528 		DRM_ERROR("%p bind failed\n", rbo);
7529 		return r;
7530 	}
7531 
7532 	ttm_eu_backoff_reservation(&ticket, &list);
7533 
7534 	afb->address = amdgpu_bo_gpu_offset(rbo);
7535 
7536 	amdgpu_bo_ref(rbo);
7537 
7538 	/**
7539 	 * We don't do surface updates on planes that have been newly created,
7540 	 * but we also don't have the afb->address during atomic check.
7541 	 *
7542 	 * Fill in buffer attributes depending on the address here, but only on
7543 	 * newly created planes since they're not being used by DC yet and this
7544 	 * won't modify global state.
7545 	 */
7546 	dm_plane_state_old = to_dm_plane_state(plane->state);
7547 	dm_plane_state_new = to_dm_plane_state(new_state);
7548 
7549 	if (dm_plane_state_new->dc_state &&
7550 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7551 		struct dc_plane_state *plane_state =
7552 			dm_plane_state_new->dc_state;
7553 		bool force_disable_dcc = !plane_state->dcc.enable;
7554 
7555 		fill_plane_buffer_attributes(
7556 			adev, afb, plane_state->format, plane_state->rotation,
7557 			afb->tiling_flags,
7558 			&plane_state->tiling_info, &plane_state->plane_size,
7559 			&plane_state->dcc, &plane_state->address,
7560 			afb->tmz_surface, force_disable_dcc);
7561 	}
7562 
7563 	return 0;
7564 }
7565 
7566 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7567 				       struct drm_plane_state *old_state)
7568 {
7569 	struct amdgpu_bo *rbo;
7570 	int r;
7571 
7572 	if (!old_state->fb)
7573 		return;
7574 
7575 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7576 	r = amdgpu_bo_reserve(rbo, false);
7577 	if (unlikely(r)) {
7578 		DRM_ERROR("failed to reserve rbo before unpin\n");
7579 		return;
7580 	}
7581 
7582 	amdgpu_bo_unpin(rbo);
7583 	amdgpu_bo_unreserve(rbo);
7584 	amdgpu_bo_unref(&rbo);
7585 }
7586 
7587 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7588 				       struct drm_crtc_state *new_crtc_state)
7589 {
7590 	struct drm_framebuffer *fb = state->fb;
7591 	int min_downscale, max_upscale;
7592 	int min_scale = 0;
7593 	int max_scale = INT_MAX;
7594 
7595 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7596 	if (fb && state->crtc) {
7597 		/* Validate viewport to cover the case when only the position changes */
7598 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7599 			int viewport_width = state->crtc_w;
7600 			int viewport_height = state->crtc_h;
7601 
7602 			if (state->crtc_x < 0)
7603 				viewport_width += state->crtc_x;
7604 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7605 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7606 
7607 			if (state->crtc_y < 0)
7608 				viewport_height += state->crtc_y;
7609 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7610 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7611 
7612 			if (viewport_width < 0 || viewport_height < 0) {
7613 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7614 				return -EINVAL;
7615 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7616 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7617 				return -EINVAL;
7618 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7619 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7620 				return -EINVAL;
7621 			}
7622 
7623 		}
7624 
7625 		/* Get min/max allowed scaling factors from plane caps. */
7626 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7627 					     &min_downscale, &max_upscale);
7628 		/*
7629 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7630 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7631 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7632 		 */
7633 		min_scale = (1000 << 16) / max_upscale;
7634 		max_scale = (1000 << 16) / min_downscale;
7635 	}
7636 
7637 	return drm_atomic_helper_check_plane_state(
7638 		state, new_crtc_state, min_scale, max_scale, true, true);
7639 }
7640 
7641 static int dm_plane_atomic_check(struct drm_plane *plane,
7642 				 struct drm_atomic_state *state)
7643 {
7644 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7645 										 plane);
7646 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7647 	struct dc *dc = adev->dm.dc;
7648 	struct dm_plane_state *dm_plane_state;
7649 	struct dc_scaling_info scaling_info;
7650 	struct drm_crtc_state *new_crtc_state;
7651 	int ret;
7652 
7653 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7654 
7655 	dm_plane_state = to_dm_plane_state(new_plane_state);
7656 
7657 	if (!dm_plane_state->dc_state)
7658 		return 0;
7659 
7660 	new_crtc_state =
7661 		drm_atomic_get_new_crtc_state(state,
7662 					      new_plane_state->crtc);
7663 	if (!new_crtc_state)
7664 		return -EINVAL;
7665 
7666 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7667 	if (ret)
7668 		return ret;
7669 
7670 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7671 	if (ret)
7672 		return ret;
7673 
7674 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7675 		return 0;
7676 
7677 	return -EINVAL;
7678 }
7679 
7680 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7681 				       struct drm_atomic_state *state)
7682 {
7683 	/* Only support async updates on cursor planes. */
7684 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7685 		return -EINVAL;
7686 
7687 	return 0;
7688 }
7689 
7690 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7691 					 struct drm_atomic_state *state)
7692 {
7693 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7694 									   plane);
7695 	struct drm_plane_state *old_state =
7696 		drm_atomic_get_old_plane_state(state, plane);
7697 
7698 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7699 
7700 	swap(plane->state->fb, new_state->fb);
7701 
7702 	plane->state->src_x = new_state->src_x;
7703 	plane->state->src_y = new_state->src_y;
7704 	plane->state->src_w = new_state->src_w;
7705 	plane->state->src_h = new_state->src_h;
7706 	plane->state->crtc_x = new_state->crtc_x;
7707 	plane->state->crtc_y = new_state->crtc_y;
7708 	plane->state->crtc_w = new_state->crtc_w;
7709 	plane->state->crtc_h = new_state->crtc_h;
7710 
7711 	handle_cursor_update(plane, old_state);
7712 }
7713 
7714 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7715 	.prepare_fb = dm_plane_helper_prepare_fb,
7716 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7717 	.atomic_check = dm_plane_atomic_check,
7718 	.atomic_async_check = dm_plane_atomic_async_check,
7719 	.atomic_async_update = dm_plane_atomic_async_update
7720 };
7721 
7722 /*
7723  * TODO: these are currently initialized to rgb formats only.
7724  * For future use cases we should either initialize them dynamically based on
7725  * plane capabilities, or initialize this array to all formats, so internal drm
7726  * check will succeed, and let DC implement proper check
7727  */
7728 static const uint32_t rgb_formats[] = {
7729 	DRM_FORMAT_XRGB8888,
7730 	DRM_FORMAT_ARGB8888,
7731 	DRM_FORMAT_RGBA8888,
7732 	DRM_FORMAT_XRGB2101010,
7733 	DRM_FORMAT_XBGR2101010,
7734 	DRM_FORMAT_ARGB2101010,
7735 	DRM_FORMAT_ABGR2101010,
7736 	DRM_FORMAT_XRGB16161616,
7737 	DRM_FORMAT_XBGR16161616,
7738 	DRM_FORMAT_ARGB16161616,
7739 	DRM_FORMAT_ABGR16161616,
7740 	DRM_FORMAT_XBGR8888,
7741 	DRM_FORMAT_ABGR8888,
7742 	DRM_FORMAT_RGB565,
7743 };
7744 
7745 static const uint32_t overlay_formats[] = {
7746 	DRM_FORMAT_XRGB8888,
7747 	DRM_FORMAT_ARGB8888,
7748 	DRM_FORMAT_RGBA8888,
7749 	DRM_FORMAT_XBGR8888,
7750 	DRM_FORMAT_ABGR8888,
7751 	DRM_FORMAT_RGB565
7752 };
7753 
7754 static const u32 cursor_formats[] = {
7755 	DRM_FORMAT_ARGB8888
7756 };
7757 
7758 static int get_plane_formats(const struct drm_plane *plane,
7759 			     const struct dc_plane_cap *plane_cap,
7760 			     uint32_t *formats, int max_formats)
7761 {
7762 	int i, num_formats = 0;
7763 
7764 	/*
7765 	 * TODO: Query support for each group of formats directly from
7766 	 * DC plane caps. This will require adding more formats to the
7767 	 * caps list.
7768 	 */
7769 
7770 	switch (plane->type) {
7771 	case DRM_PLANE_TYPE_PRIMARY:
7772 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7773 			if (num_formats >= max_formats)
7774 				break;
7775 
7776 			formats[num_formats++] = rgb_formats[i];
7777 		}
7778 
7779 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7780 			formats[num_formats++] = DRM_FORMAT_NV12;
7781 		if (plane_cap && plane_cap->pixel_format_support.p010)
7782 			formats[num_formats++] = DRM_FORMAT_P010;
7783 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7784 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7785 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7786 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7787 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7788 		}
7789 		break;
7790 
7791 	case DRM_PLANE_TYPE_OVERLAY:
7792 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7793 			if (num_formats >= max_formats)
7794 				break;
7795 
7796 			formats[num_formats++] = overlay_formats[i];
7797 		}
7798 		break;
7799 
7800 	case DRM_PLANE_TYPE_CURSOR:
7801 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7802 			if (num_formats >= max_formats)
7803 				break;
7804 
7805 			formats[num_formats++] = cursor_formats[i];
7806 		}
7807 		break;
7808 	}
7809 
7810 	return num_formats;
7811 }
7812 
7813 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7814 				struct drm_plane *plane,
7815 				unsigned long possible_crtcs,
7816 				const struct dc_plane_cap *plane_cap)
7817 {
7818 	uint32_t formats[32];
7819 	int num_formats;
7820 	int res = -EPERM;
7821 	unsigned int supported_rotations;
7822 	uint64_t *modifiers = NULL;
7823 
7824 	num_formats = get_plane_formats(plane, plane_cap, formats,
7825 					ARRAY_SIZE(formats));
7826 
7827 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7828 	if (res)
7829 		return res;
7830 
7831 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7832 				       &dm_plane_funcs, formats, num_formats,
7833 				       modifiers, plane->type, NULL);
7834 	kfree(modifiers);
7835 	if (res)
7836 		return res;
7837 
7838 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7839 	    plane_cap && plane_cap->per_pixel_alpha) {
7840 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7841 					  BIT(DRM_MODE_BLEND_PREMULTI);
7842 
7843 		drm_plane_create_alpha_property(plane);
7844 		drm_plane_create_blend_mode_property(plane, blend_caps);
7845 	}
7846 
7847 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7848 	    plane_cap &&
7849 	    (plane_cap->pixel_format_support.nv12 ||
7850 	     plane_cap->pixel_format_support.p010)) {
7851 		/* This only affects YUV formats. */
7852 		drm_plane_create_color_properties(
7853 			plane,
7854 			BIT(DRM_COLOR_YCBCR_BT601) |
7855 			BIT(DRM_COLOR_YCBCR_BT709) |
7856 			BIT(DRM_COLOR_YCBCR_BT2020),
7857 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7858 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7859 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7860 	}
7861 
7862 	supported_rotations =
7863 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7864 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7865 
7866 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7867 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7868 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7869 						   supported_rotations);
7870 
7871 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7872 
7873 	/* Create (reset) the plane state */
7874 	if (plane->funcs->reset)
7875 		plane->funcs->reset(plane);
7876 
7877 	return 0;
7878 }
7879 
7880 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7881 			       struct drm_plane *plane,
7882 			       uint32_t crtc_index)
7883 {
7884 	struct amdgpu_crtc *acrtc = NULL;
7885 	struct drm_plane *cursor_plane;
7886 
7887 	int res = -ENOMEM;
7888 
7889 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7890 	if (!cursor_plane)
7891 		goto fail;
7892 
7893 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7894 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7895 
7896 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7897 	if (!acrtc)
7898 		goto fail;
7899 
7900 	res = drm_crtc_init_with_planes(
7901 			dm->ddev,
7902 			&acrtc->base,
7903 			plane,
7904 			cursor_plane,
7905 			&amdgpu_dm_crtc_funcs, NULL);
7906 
7907 	if (res)
7908 		goto fail;
7909 
7910 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7911 
7912 	/* Create (reset) the plane state */
7913 	if (acrtc->base.funcs->reset)
7914 		acrtc->base.funcs->reset(&acrtc->base);
7915 
7916 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7917 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7918 
7919 	acrtc->crtc_id = crtc_index;
7920 	acrtc->base.enabled = false;
7921 	acrtc->otg_inst = -1;
7922 
7923 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7924 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7925 				   true, MAX_COLOR_LUT_ENTRIES);
7926 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7927 
7928 	return 0;
7929 
7930 fail:
7931 	kfree(acrtc);
7932 	kfree(cursor_plane);
7933 	return res;
7934 }
7935 
7936 
7937 static int to_drm_connector_type(enum signal_type st)
7938 {
7939 	switch (st) {
7940 	case SIGNAL_TYPE_HDMI_TYPE_A:
7941 		return DRM_MODE_CONNECTOR_HDMIA;
7942 	case SIGNAL_TYPE_EDP:
7943 		return DRM_MODE_CONNECTOR_eDP;
7944 	case SIGNAL_TYPE_LVDS:
7945 		return DRM_MODE_CONNECTOR_LVDS;
7946 	case SIGNAL_TYPE_RGB:
7947 		return DRM_MODE_CONNECTOR_VGA;
7948 	case SIGNAL_TYPE_DISPLAY_PORT:
7949 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7950 		return DRM_MODE_CONNECTOR_DisplayPort;
7951 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7952 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7953 		return DRM_MODE_CONNECTOR_DVID;
7954 	case SIGNAL_TYPE_VIRTUAL:
7955 		return DRM_MODE_CONNECTOR_VIRTUAL;
7956 
7957 	default:
7958 		return DRM_MODE_CONNECTOR_Unknown;
7959 	}
7960 }
7961 
7962 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7963 {
7964 	struct drm_encoder *encoder;
7965 
7966 	/* There is only one encoder per connector */
7967 	drm_connector_for_each_possible_encoder(connector, encoder)
7968 		return encoder;
7969 
7970 	return NULL;
7971 }
7972 
7973 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7974 {
7975 	struct drm_encoder *encoder;
7976 	struct amdgpu_encoder *amdgpu_encoder;
7977 
7978 	encoder = amdgpu_dm_connector_to_encoder(connector);
7979 
7980 	if (encoder == NULL)
7981 		return;
7982 
7983 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7984 
7985 	amdgpu_encoder->native_mode.clock = 0;
7986 
7987 	if (!list_empty(&connector->probed_modes)) {
7988 		struct drm_display_mode *preferred_mode = NULL;
7989 
7990 		list_for_each_entry(preferred_mode,
7991 				    &connector->probed_modes,
7992 				    head) {
7993 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7994 				amdgpu_encoder->native_mode = *preferred_mode;
7995 
7996 			break;
7997 		}
7998 
7999 	}
8000 }
8001 
8002 static struct drm_display_mode *
8003 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8004 			     char *name,
8005 			     int hdisplay, int vdisplay)
8006 {
8007 	struct drm_device *dev = encoder->dev;
8008 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8009 	struct drm_display_mode *mode = NULL;
8010 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8011 
8012 	mode = drm_mode_duplicate(dev, native_mode);
8013 
8014 	if (mode == NULL)
8015 		return NULL;
8016 
8017 	mode->hdisplay = hdisplay;
8018 	mode->vdisplay = vdisplay;
8019 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8020 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8021 
8022 	return mode;
8023 
8024 }
8025 
8026 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8027 						 struct drm_connector *connector)
8028 {
8029 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8030 	struct drm_display_mode *mode = NULL;
8031 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8032 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8033 				to_amdgpu_dm_connector(connector);
8034 	int i;
8035 	int n;
8036 	struct mode_size {
8037 		char name[DRM_DISPLAY_MODE_LEN];
8038 		int w;
8039 		int h;
8040 	} common_modes[] = {
8041 		{  "640x480",  640,  480},
8042 		{  "800x600",  800,  600},
8043 		{ "1024x768", 1024,  768},
8044 		{ "1280x720", 1280,  720},
8045 		{ "1280x800", 1280,  800},
8046 		{"1280x1024", 1280, 1024},
8047 		{ "1440x900", 1440,  900},
8048 		{"1680x1050", 1680, 1050},
8049 		{"1600x1200", 1600, 1200},
8050 		{"1920x1080", 1920, 1080},
8051 		{"1920x1200", 1920, 1200}
8052 	};
8053 
8054 	n = ARRAY_SIZE(common_modes);
8055 
8056 	for (i = 0; i < n; i++) {
8057 		struct drm_display_mode *curmode = NULL;
8058 		bool mode_existed = false;
8059 
8060 		if (common_modes[i].w > native_mode->hdisplay ||
8061 		    common_modes[i].h > native_mode->vdisplay ||
8062 		   (common_modes[i].w == native_mode->hdisplay &&
8063 		    common_modes[i].h == native_mode->vdisplay))
8064 			continue;
8065 
8066 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8067 			if (common_modes[i].w == curmode->hdisplay &&
8068 			    common_modes[i].h == curmode->vdisplay) {
8069 				mode_existed = true;
8070 				break;
8071 			}
8072 		}
8073 
8074 		if (mode_existed)
8075 			continue;
8076 
8077 		mode = amdgpu_dm_create_common_mode(encoder,
8078 				common_modes[i].name, common_modes[i].w,
8079 				common_modes[i].h);
8080 		drm_mode_probed_add(connector, mode);
8081 		amdgpu_dm_connector->num_modes++;
8082 	}
8083 }
8084 
8085 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8086 {
8087 	struct drm_encoder *encoder;
8088 	struct amdgpu_encoder *amdgpu_encoder;
8089 	const struct drm_display_mode *native_mode;
8090 
8091 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8092 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8093 		return;
8094 
8095 	encoder = amdgpu_dm_connector_to_encoder(connector);
8096 	if (!encoder)
8097 		return;
8098 
8099 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8100 
8101 	native_mode = &amdgpu_encoder->native_mode;
8102 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8103 		return;
8104 
8105 	drm_connector_set_panel_orientation_with_quirk(connector,
8106 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8107 						       native_mode->hdisplay,
8108 						       native_mode->vdisplay);
8109 }
8110 
8111 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8112 					      struct edid *edid)
8113 {
8114 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8115 			to_amdgpu_dm_connector(connector);
8116 
8117 	if (edid) {
8118 		/* empty probed_modes */
8119 		INIT_LIST_HEAD(&connector->probed_modes);
8120 		amdgpu_dm_connector->num_modes =
8121 				drm_add_edid_modes(connector, edid);
8122 
8123 		/* sorting the probed modes before calling function
8124 		 * amdgpu_dm_get_native_mode() since EDID can have
8125 		 * more than one preferred mode. The modes that are
8126 		 * later in the probed mode list could be of higher
8127 		 * and preferred resolution. For example, 3840x2160
8128 		 * resolution in base EDID preferred timing and 4096x2160
8129 		 * preferred resolution in DID extension block later.
8130 		 */
8131 		drm_mode_sort(&connector->probed_modes);
8132 		amdgpu_dm_get_native_mode(connector);
8133 
8134 		/* Freesync capabilities are reset by calling
8135 		 * drm_add_edid_modes() and need to be
8136 		 * restored here.
8137 		 */
8138 		amdgpu_dm_update_freesync_caps(connector, edid);
8139 
8140 		amdgpu_set_panel_orientation(connector);
8141 	} else {
8142 		amdgpu_dm_connector->num_modes = 0;
8143 	}
8144 }
8145 
8146 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8147 			      struct drm_display_mode *mode)
8148 {
8149 	struct drm_display_mode *m;
8150 
8151 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8152 		if (drm_mode_equal(m, mode))
8153 			return true;
8154 	}
8155 
8156 	return false;
8157 }
8158 
8159 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8160 {
8161 	const struct drm_display_mode *m;
8162 	struct drm_display_mode *new_mode;
8163 	uint i;
8164 	uint32_t new_modes_count = 0;
8165 
8166 	/* Standard FPS values
8167 	 *
8168 	 * 23.976       - TV/NTSC
8169 	 * 24 	        - Cinema
8170 	 * 25 	        - TV/PAL
8171 	 * 29.97        - TV/NTSC
8172 	 * 30 	        - TV/NTSC
8173 	 * 48 	        - Cinema HFR
8174 	 * 50 	        - TV/PAL
8175 	 * 60 	        - Commonly used
8176 	 * 48,72,96,120 - Multiples of 24
8177 	 */
8178 	static const uint32_t common_rates[] = {
8179 		23976, 24000, 25000, 29970, 30000,
8180 		48000, 50000, 60000, 72000, 96000, 120000
8181 	};
8182 
8183 	/*
8184 	 * Find mode with highest refresh rate with the same resolution
8185 	 * as the preferred mode. Some monitors report a preferred mode
8186 	 * with lower resolution than the highest refresh rate supported.
8187 	 */
8188 
8189 	m = get_highest_refresh_rate_mode(aconnector, true);
8190 	if (!m)
8191 		return 0;
8192 
8193 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8194 		uint64_t target_vtotal, target_vtotal_diff;
8195 		uint64_t num, den;
8196 
8197 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8198 			continue;
8199 
8200 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8201 		    common_rates[i] > aconnector->max_vfreq * 1000)
8202 			continue;
8203 
8204 		num = (unsigned long long)m->clock * 1000 * 1000;
8205 		den = common_rates[i] * (unsigned long long)m->htotal;
8206 		target_vtotal = div_u64(num, den);
8207 		target_vtotal_diff = target_vtotal - m->vtotal;
8208 
8209 		/* Check for illegal modes */
8210 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8211 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8212 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8213 			continue;
8214 
8215 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8216 		if (!new_mode)
8217 			goto out;
8218 
8219 		new_mode->vtotal += (u16)target_vtotal_diff;
8220 		new_mode->vsync_start += (u16)target_vtotal_diff;
8221 		new_mode->vsync_end += (u16)target_vtotal_diff;
8222 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8223 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8224 
8225 		if (!is_duplicate_mode(aconnector, new_mode)) {
8226 			drm_mode_probed_add(&aconnector->base, new_mode);
8227 			new_modes_count += 1;
8228 		} else
8229 			drm_mode_destroy(aconnector->base.dev, new_mode);
8230 	}
8231  out:
8232 	return new_modes_count;
8233 }
8234 
8235 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8236 						   struct edid *edid)
8237 {
8238 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8239 		to_amdgpu_dm_connector(connector);
8240 
8241 	if (!(amdgpu_freesync_vid_mode && edid))
8242 		return;
8243 
8244 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8245 		amdgpu_dm_connector->num_modes +=
8246 			add_fs_modes(amdgpu_dm_connector);
8247 }
8248 
8249 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8250 {
8251 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8252 			to_amdgpu_dm_connector(connector);
8253 	struct drm_encoder *encoder;
8254 	struct edid *edid = amdgpu_dm_connector->edid;
8255 
8256 	encoder = amdgpu_dm_connector_to_encoder(connector);
8257 
8258 	if (!drm_edid_is_valid(edid)) {
8259 		amdgpu_dm_connector->num_modes =
8260 				drm_add_modes_noedid(connector, 640, 480);
8261 	} else {
8262 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8263 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8264 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8265 	}
8266 	amdgpu_dm_fbc_init(connector);
8267 
8268 	return amdgpu_dm_connector->num_modes;
8269 }
8270 
8271 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8272 				     struct amdgpu_dm_connector *aconnector,
8273 				     int connector_type,
8274 				     struct dc_link *link,
8275 				     int link_index)
8276 {
8277 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8278 
8279 	/*
8280 	 * Some of the properties below require access to state, like bpc.
8281 	 * Allocate some default initial connector state with our reset helper.
8282 	 */
8283 	if (aconnector->base.funcs->reset)
8284 		aconnector->base.funcs->reset(&aconnector->base);
8285 
8286 	aconnector->connector_id = link_index;
8287 	aconnector->dc_link = link;
8288 	aconnector->base.interlace_allowed = false;
8289 	aconnector->base.doublescan_allowed = false;
8290 	aconnector->base.stereo_allowed = false;
8291 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8292 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8293 	aconnector->audio_inst = -1;
8294 	mutex_init(&aconnector->hpd_lock);
8295 
8296 	/*
8297 	 * configure support HPD hot plug connector_>polled default value is 0
8298 	 * which means HPD hot plug not supported
8299 	 */
8300 	switch (connector_type) {
8301 	case DRM_MODE_CONNECTOR_HDMIA:
8302 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8303 		aconnector->base.ycbcr_420_allowed =
8304 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8305 		break;
8306 	case DRM_MODE_CONNECTOR_DisplayPort:
8307 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8308 		if (link->is_dig_mapping_flexible &&
8309 		    link->dc->res_pool->funcs->link_encs_assign) {
8310 			link->link_enc =
8311 				link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8312 			if (!link->link_enc)
8313 				link->link_enc =
8314 					link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8315 		}
8316 
8317 		if (link->link_enc)
8318 			aconnector->base.ycbcr_420_allowed =
8319 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8320 		break;
8321 	case DRM_MODE_CONNECTOR_DVID:
8322 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8323 		break;
8324 	default:
8325 		break;
8326 	}
8327 
8328 	drm_object_attach_property(&aconnector->base.base,
8329 				dm->ddev->mode_config.scaling_mode_property,
8330 				DRM_MODE_SCALE_NONE);
8331 
8332 	drm_object_attach_property(&aconnector->base.base,
8333 				adev->mode_info.underscan_property,
8334 				UNDERSCAN_OFF);
8335 	drm_object_attach_property(&aconnector->base.base,
8336 				adev->mode_info.underscan_hborder_property,
8337 				0);
8338 	drm_object_attach_property(&aconnector->base.base,
8339 				adev->mode_info.underscan_vborder_property,
8340 				0);
8341 
8342 	if (!aconnector->mst_port)
8343 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8344 
8345 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8346 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8347 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8348 
8349 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8350 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8351 		drm_object_attach_property(&aconnector->base.base,
8352 				adev->mode_info.abm_level_property, 0);
8353 	}
8354 
8355 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8356 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8357 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8358 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8359 
8360 		if (!aconnector->mst_port)
8361 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8362 
8363 #ifdef CONFIG_DRM_AMD_DC_HDCP
8364 		if (adev->dm.hdcp_workqueue)
8365 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8366 #endif
8367 	}
8368 }
8369 
8370 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8371 			      struct i2c_msg *msgs, int num)
8372 {
8373 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8374 	struct ddc_service *ddc_service = i2c->ddc_service;
8375 	struct i2c_command cmd;
8376 	int i;
8377 	int result = -EIO;
8378 
8379 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8380 
8381 	if (!cmd.payloads)
8382 		return result;
8383 
8384 	cmd.number_of_payloads = num;
8385 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8386 	cmd.speed = 100;
8387 
8388 	for (i = 0; i < num; i++) {
8389 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8390 		cmd.payloads[i].address = msgs[i].addr;
8391 		cmd.payloads[i].length = msgs[i].len;
8392 		cmd.payloads[i].data = msgs[i].buf;
8393 	}
8394 
8395 	if (dc_submit_i2c(
8396 			ddc_service->ctx->dc,
8397 			ddc_service->ddc_pin->hw_info.ddc_channel,
8398 			&cmd))
8399 		result = num;
8400 
8401 	kfree(cmd.payloads);
8402 	return result;
8403 }
8404 
8405 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8406 {
8407 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8408 }
8409 
8410 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8411 	.master_xfer = amdgpu_dm_i2c_xfer,
8412 	.functionality = amdgpu_dm_i2c_func,
8413 };
8414 
8415 static struct amdgpu_i2c_adapter *
8416 create_i2c(struct ddc_service *ddc_service,
8417 	   int link_index,
8418 	   int *res)
8419 {
8420 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8421 	struct amdgpu_i2c_adapter *i2c;
8422 
8423 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8424 	if (!i2c)
8425 		return NULL;
8426 	i2c->base.owner = THIS_MODULE;
8427 	i2c->base.class = I2C_CLASS_DDC;
8428 	i2c->base.dev.parent = &adev->pdev->dev;
8429 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8430 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8431 	i2c_set_adapdata(&i2c->base, i2c);
8432 	i2c->ddc_service = ddc_service;
8433 	if (i2c->ddc_service->ddc_pin)
8434 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8435 
8436 	return i2c;
8437 }
8438 
8439 
8440 /*
8441  * Note: this function assumes that dc_link_detect() was called for the
8442  * dc_link which will be represented by this aconnector.
8443  */
8444 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8445 				    struct amdgpu_dm_connector *aconnector,
8446 				    uint32_t link_index,
8447 				    struct amdgpu_encoder *aencoder)
8448 {
8449 	int res = 0;
8450 	int connector_type;
8451 	struct dc *dc = dm->dc;
8452 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8453 	struct amdgpu_i2c_adapter *i2c;
8454 
8455 	link->priv = aconnector;
8456 
8457 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8458 
8459 	i2c = create_i2c(link->ddc, link->link_index, &res);
8460 	if (!i2c) {
8461 		DRM_ERROR("Failed to create i2c adapter data\n");
8462 		return -ENOMEM;
8463 	}
8464 
8465 	aconnector->i2c = i2c;
8466 	res = i2c_add_adapter(&i2c->base);
8467 
8468 	if (res) {
8469 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8470 		goto out_free;
8471 	}
8472 
8473 	connector_type = to_drm_connector_type(link->connector_signal);
8474 
8475 	res = drm_connector_init_with_ddc(
8476 			dm->ddev,
8477 			&aconnector->base,
8478 			&amdgpu_dm_connector_funcs,
8479 			connector_type,
8480 			&i2c->base);
8481 
8482 	if (res) {
8483 		DRM_ERROR("connector_init failed\n");
8484 		aconnector->connector_id = -1;
8485 		goto out_free;
8486 	}
8487 
8488 	drm_connector_helper_add(
8489 			&aconnector->base,
8490 			&amdgpu_dm_connector_helper_funcs);
8491 
8492 	amdgpu_dm_connector_init_helper(
8493 		dm,
8494 		aconnector,
8495 		connector_type,
8496 		link,
8497 		link_index);
8498 
8499 	drm_connector_attach_encoder(
8500 		&aconnector->base, &aencoder->base);
8501 
8502 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8503 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8504 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8505 
8506 out_free:
8507 	if (res) {
8508 		kfree(i2c);
8509 		aconnector->i2c = NULL;
8510 	}
8511 	return res;
8512 }
8513 
8514 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8515 {
8516 	switch (adev->mode_info.num_crtc) {
8517 	case 1:
8518 		return 0x1;
8519 	case 2:
8520 		return 0x3;
8521 	case 3:
8522 		return 0x7;
8523 	case 4:
8524 		return 0xf;
8525 	case 5:
8526 		return 0x1f;
8527 	case 6:
8528 	default:
8529 		return 0x3f;
8530 	}
8531 }
8532 
8533 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8534 				  struct amdgpu_encoder *aencoder,
8535 				  uint32_t link_index)
8536 {
8537 	struct amdgpu_device *adev = drm_to_adev(dev);
8538 
8539 	int res = drm_encoder_init(dev,
8540 				   &aencoder->base,
8541 				   &amdgpu_dm_encoder_funcs,
8542 				   DRM_MODE_ENCODER_TMDS,
8543 				   NULL);
8544 
8545 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8546 
8547 	if (!res)
8548 		aencoder->encoder_id = link_index;
8549 	else
8550 		aencoder->encoder_id = -1;
8551 
8552 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8553 
8554 	return res;
8555 }
8556 
8557 static void manage_dm_interrupts(struct amdgpu_device *adev,
8558 				 struct amdgpu_crtc *acrtc,
8559 				 bool enable)
8560 {
8561 	/*
8562 	 * We have no guarantee that the frontend index maps to the same
8563 	 * backend index - some even map to more than one.
8564 	 *
8565 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8566 	 */
8567 	int irq_type =
8568 		amdgpu_display_crtc_idx_to_irq_type(
8569 			adev,
8570 			acrtc->crtc_id);
8571 
8572 	if (enable) {
8573 		drm_crtc_vblank_on(&acrtc->base);
8574 		amdgpu_irq_get(
8575 			adev,
8576 			&adev->pageflip_irq,
8577 			irq_type);
8578 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8579 		amdgpu_irq_get(
8580 			adev,
8581 			&adev->vline0_irq,
8582 			irq_type);
8583 #endif
8584 	} else {
8585 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8586 		amdgpu_irq_put(
8587 			adev,
8588 			&adev->vline0_irq,
8589 			irq_type);
8590 #endif
8591 		amdgpu_irq_put(
8592 			adev,
8593 			&adev->pageflip_irq,
8594 			irq_type);
8595 		drm_crtc_vblank_off(&acrtc->base);
8596 	}
8597 }
8598 
8599 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8600 				      struct amdgpu_crtc *acrtc)
8601 {
8602 	int irq_type =
8603 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8604 
8605 	/**
8606 	 * This reads the current state for the IRQ and force reapplies
8607 	 * the setting to hardware.
8608 	 */
8609 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8610 }
8611 
8612 static bool
8613 is_scaling_state_different(const struct dm_connector_state *dm_state,
8614 			   const struct dm_connector_state *old_dm_state)
8615 {
8616 	if (dm_state->scaling != old_dm_state->scaling)
8617 		return true;
8618 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8619 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8620 			return true;
8621 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8622 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8623 			return true;
8624 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8625 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8626 		return true;
8627 	return false;
8628 }
8629 
8630 #ifdef CONFIG_DRM_AMD_DC_HDCP
8631 static bool is_content_protection_different(struct drm_connector_state *state,
8632 					    const struct drm_connector_state *old_state,
8633 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8634 {
8635 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8636 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8637 
8638 	/* Handle: Type0/1 change */
8639 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8640 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8641 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8642 		return true;
8643 	}
8644 
8645 	/* CP is being re enabled, ignore this
8646 	 *
8647 	 * Handles:	ENABLED -> DESIRED
8648 	 */
8649 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8650 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8651 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8652 		return false;
8653 	}
8654 
8655 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8656 	 *
8657 	 * Handles:	UNDESIRED -> ENABLED
8658 	 */
8659 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8660 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8661 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8662 
8663 	/* Stream removed and re-enabled
8664 	 *
8665 	 * Can sometimes overlap with the HPD case,
8666 	 * thus set update_hdcp to false to avoid
8667 	 * setting HDCP multiple times.
8668 	 *
8669 	 * Handles:	DESIRED -> DESIRED (Special case)
8670 	 */
8671 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8672 		state->crtc && state->crtc->enabled &&
8673 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8674 		dm_con_state->update_hdcp = false;
8675 		return true;
8676 	}
8677 
8678 	/* Hot-plug, headless s3, dpms
8679 	 *
8680 	 * Only start HDCP if the display is connected/enabled.
8681 	 * update_hdcp flag will be set to false until the next
8682 	 * HPD comes in.
8683 	 *
8684 	 * Handles:	DESIRED -> DESIRED (Special case)
8685 	 */
8686 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8687 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8688 		dm_con_state->update_hdcp = false;
8689 		return true;
8690 	}
8691 
8692 	/*
8693 	 * Handles:	UNDESIRED -> UNDESIRED
8694 	 *		DESIRED -> DESIRED
8695 	 *		ENABLED -> ENABLED
8696 	 */
8697 	if (old_state->content_protection == state->content_protection)
8698 		return false;
8699 
8700 	/*
8701 	 * Handles:	UNDESIRED -> DESIRED
8702 	 *		DESIRED -> UNDESIRED
8703 	 *		ENABLED -> UNDESIRED
8704 	 */
8705 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8706 		return true;
8707 
8708 	/*
8709 	 * Handles:	DESIRED -> ENABLED
8710 	 */
8711 	return false;
8712 }
8713 
8714 #endif
8715 static void remove_stream(struct amdgpu_device *adev,
8716 			  struct amdgpu_crtc *acrtc,
8717 			  struct dc_stream_state *stream)
8718 {
8719 	/* this is the update mode case */
8720 
8721 	acrtc->otg_inst = -1;
8722 	acrtc->enabled = false;
8723 }
8724 
8725 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8726 			       struct dc_cursor_position *position)
8727 {
8728 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8729 	int x, y;
8730 	int xorigin = 0, yorigin = 0;
8731 
8732 	if (!crtc || !plane->state->fb)
8733 		return 0;
8734 
8735 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8736 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8737 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8738 			  __func__,
8739 			  plane->state->crtc_w,
8740 			  plane->state->crtc_h);
8741 		return -EINVAL;
8742 	}
8743 
8744 	x = plane->state->crtc_x;
8745 	y = plane->state->crtc_y;
8746 
8747 	if (x <= -amdgpu_crtc->max_cursor_width ||
8748 	    y <= -amdgpu_crtc->max_cursor_height)
8749 		return 0;
8750 
8751 	if (x < 0) {
8752 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8753 		x = 0;
8754 	}
8755 	if (y < 0) {
8756 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8757 		y = 0;
8758 	}
8759 	position->enable = true;
8760 	position->translate_by_source = true;
8761 	position->x = x;
8762 	position->y = y;
8763 	position->x_hotspot = xorigin;
8764 	position->y_hotspot = yorigin;
8765 
8766 	return 0;
8767 }
8768 
8769 static void handle_cursor_update(struct drm_plane *plane,
8770 				 struct drm_plane_state *old_plane_state)
8771 {
8772 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8773 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8774 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8775 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8776 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8777 	uint64_t address = afb ? afb->address : 0;
8778 	struct dc_cursor_position position = {0};
8779 	struct dc_cursor_attributes attributes;
8780 	int ret;
8781 
8782 	if (!plane->state->fb && !old_plane_state->fb)
8783 		return;
8784 
8785 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8786 		      __func__,
8787 		      amdgpu_crtc->crtc_id,
8788 		      plane->state->crtc_w,
8789 		      plane->state->crtc_h);
8790 
8791 	ret = get_cursor_position(plane, crtc, &position);
8792 	if (ret)
8793 		return;
8794 
8795 	if (!position.enable) {
8796 		/* turn off cursor */
8797 		if (crtc_state && crtc_state->stream) {
8798 			mutex_lock(&adev->dm.dc_lock);
8799 			dc_stream_set_cursor_position(crtc_state->stream,
8800 						      &position);
8801 			mutex_unlock(&adev->dm.dc_lock);
8802 		}
8803 		return;
8804 	}
8805 
8806 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8807 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8808 
8809 	memset(&attributes, 0, sizeof(attributes));
8810 	attributes.address.high_part = upper_32_bits(address);
8811 	attributes.address.low_part  = lower_32_bits(address);
8812 	attributes.width             = plane->state->crtc_w;
8813 	attributes.height            = plane->state->crtc_h;
8814 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8815 	attributes.rotation_angle    = 0;
8816 	attributes.attribute_flags.value = 0;
8817 
8818 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8819 
8820 	if (crtc_state->stream) {
8821 		mutex_lock(&adev->dm.dc_lock);
8822 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8823 							 &attributes))
8824 			DRM_ERROR("DC failed to set cursor attributes\n");
8825 
8826 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8827 						   &position))
8828 			DRM_ERROR("DC failed to set cursor position\n");
8829 		mutex_unlock(&adev->dm.dc_lock);
8830 	}
8831 }
8832 
8833 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8834 {
8835 
8836 	assert_spin_locked(&acrtc->base.dev->event_lock);
8837 	WARN_ON(acrtc->event);
8838 
8839 	acrtc->event = acrtc->base.state->event;
8840 
8841 	/* Set the flip status */
8842 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8843 
8844 	/* Mark this event as consumed */
8845 	acrtc->base.state->event = NULL;
8846 
8847 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8848 		     acrtc->crtc_id);
8849 }
8850 
8851 static void update_freesync_state_on_stream(
8852 	struct amdgpu_display_manager *dm,
8853 	struct dm_crtc_state *new_crtc_state,
8854 	struct dc_stream_state *new_stream,
8855 	struct dc_plane_state *surface,
8856 	u32 flip_timestamp_in_us)
8857 {
8858 	struct mod_vrr_params vrr_params;
8859 	struct dc_info_packet vrr_infopacket = {0};
8860 	struct amdgpu_device *adev = dm->adev;
8861 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8862 	unsigned long flags;
8863 	bool pack_sdp_v1_3 = false;
8864 
8865 	if (!new_stream)
8866 		return;
8867 
8868 	/*
8869 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8870 	 * For now it's sufficient to just guard against these conditions.
8871 	 */
8872 
8873 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8874 		return;
8875 
8876 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8877         vrr_params = acrtc->dm_irq_params.vrr_params;
8878 
8879 	if (surface) {
8880 		mod_freesync_handle_preflip(
8881 			dm->freesync_module,
8882 			surface,
8883 			new_stream,
8884 			flip_timestamp_in_us,
8885 			&vrr_params);
8886 
8887 		if (adev->family < AMDGPU_FAMILY_AI &&
8888 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8889 			mod_freesync_handle_v_update(dm->freesync_module,
8890 						     new_stream, &vrr_params);
8891 
8892 			/* Need to call this before the frame ends. */
8893 			dc_stream_adjust_vmin_vmax(dm->dc,
8894 						   new_crtc_state->stream,
8895 						   &vrr_params.adjust);
8896 		}
8897 	}
8898 
8899 	mod_freesync_build_vrr_infopacket(
8900 		dm->freesync_module,
8901 		new_stream,
8902 		&vrr_params,
8903 		PACKET_TYPE_VRR,
8904 		TRANSFER_FUNC_UNKNOWN,
8905 		&vrr_infopacket,
8906 		pack_sdp_v1_3);
8907 
8908 	new_crtc_state->freesync_timing_changed |=
8909 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8910 			&vrr_params.adjust,
8911 			sizeof(vrr_params.adjust)) != 0);
8912 
8913 	new_crtc_state->freesync_vrr_info_changed |=
8914 		(memcmp(&new_crtc_state->vrr_infopacket,
8915 			&vrr_infopacket,
8916 			sizeof(vrr_infopacket)) != 0);
8917 
8918 	acrtc->dm_irq_params.vrr_params = vrr_params;
8919 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8920 
8921 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8922 	new_stream->vrr_infopacket = vrr_infopacket;
8923 
8924 	if (new_crtc_state->freesync_vrr_info_changed)
8925 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8926 			      new_crtc_state->base.crtc->base.id,
8927 			      (int)new_crtc_state->base.vrr_enabled,
8928 			      (int)vrr_params.state);
8929 
8930 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8931 }
8932 
8933 static void update_stream_irq_parameters(
8934 	struct amdgpu_display_manager *dm,
8935 	struct dm_crtc_state *new_crtc_state)
8936 {
8937 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8938 	struct mod_vrr_params vrr_params;
8939 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8940 	struct amdgpu_device *adev = dm->adev;
8941 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8942 	unsigned long flags;
8943 
8944 	if (!new_stream)
8945 		return;
8946 
8947 	/*
8948 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8949 	 * For now it's sufficient to just guard against these conditions.
8950 	 */
8951 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8952 		return;
8953 
8954 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8955 	vrr_params = acrtc->dm_irq_params.vrr_params;
8956 
8957 	if (new_crtc_state->vrr_supported &&
8958 	    config.min_refresh_in_uhz &&
8959 	    config.max_refresh_in_uhz) {
8960 		/*
8961 		 * if freesync compatible mode was set, config.state will be set
8962 		 * in atomic check
8963 		 */
8964 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8965 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8966 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8967 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8968 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8969 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8970 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8971 		} else {
8972 			config.state = new_crtc_state->base.vrr_enabled ?
8973 						     VRR_STATE_ACTIVE_VARIABLE :
8974 						     VRR_STATE_INACTIVE;
8975 		}
8976 	} else {
8977 		config.state = VRR_STATE_UNSUPPORTED;
8978 	}
8979 
8980 	mod_freesync_build_vrr_params(dm->freesync_module,
8981 				      new_stream,
8982 				      &config, &vrr_params);
8983 
8984 	new_crtc_state->freesync_timing_changed |=
8985 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8986 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8987 
8988 	new_crtc_state->freesync_config = config;
8989 	/* Copy state for access from DM IRQ handler */
8990 	acrtc->dm_irq_params.freesync_config = config;
8991 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8992 	acrtc->dm_irq_params.vrr_params = vrr_params;
8993 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8994 }
8995 
8996 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8997 					    struct dm_crtc_state *new_state)
8998 {
8999 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9000 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9001 
9002 	if (!old_vrr_active && new_vrr_active) {
9003 		/* Transition VRR inactive -> active:
9004 		 * While VRR is active, we must not disable vblank irq, as a
9005 		 * reenable after disable would compute bogus vblank/pflip
9006 		 * timestamps if it likely happened inside display front-porch.
9007 		 *
9008 		 * We also need vupdate irq for the actual core vblank handling
9009 		 * at end of vblank.
9010 		 */
9011 		dm_set_vupdate_irq(new_state->base.crtc, true);
9012 		drm_crtc_vblank_get(new_state->base.crtc);
9013 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9014 				 __func__, new_state->base.crtc->base.id);
9015 	} else if (old_vrr_active && !new_vrr_active) {
9016 		/* Transition VRR active -> inactive:
9017 		 * Allow vblank irq disable again for fixed refresh rate.
9018 		 */
9019 		dm_set_vupdate_irq(new_state->base.crtc, false);
9020 		drm_crtc_vblank_put(new_state->base.crtc);
9021 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9022 				 __func__, new_state->base.crtc->base.id);
9023 	}
9024 }
9025 
9026 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9027 {
9028 	struct drm_plane *plane;
9029 	struct drm_plane_state *old_plane_state;
9030 	int i;
9031 
9032 	/*
9033 	 * TODO: Make this per-stream so we don't issue redundant updates for
9034 	 * commits with multiple streams.
9035 	 */
9036 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9037 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9038 			handle_cursor_update(plane, old_plane_state);
9039 }
9040 
9041 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9042 				    struct dc_state *dc_state,
9043 				    struct drm_device *dev,
9044 				    struct amdgpu_display_manager *dm,
9045 				    struct drm_crtc *pcrtc,
9046 				    bool wait_for_vblank)
9047 {
9048 	uint32_t i;
9049 	uint64_t timestamp_ns;
9050 	struct drm_plane *plane;
9051 	struct drm_plane_state *old_plane_state, *new_plane_state;
9052 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9053 	struct drm_crtc_state *new_pcrtc_state =
9054 			drm_atomic_get_new_crtc_state(state, pcrtc);
9055 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9056 	struct dm_crtc_state *dm_old_crtc_state =
9057 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9058 	int planes_count = 0, vpos, hpos;
9059 	long r;
9060 	unsigned long flags;
9061 	struct amdgpu_bo *abo;
9062 	uint32_t target_vblank, last_flip_vblank;
9063 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9064 	bool pflip_present = false;
9065 	struct {
9066 		struct dc_surface_update surface_updates[MAX_SURFACES];
9067 		struct dc_plane_info plane_infos[MAX_SURFACES];
9068 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9069 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9070 		struct dc_stream_update stream_update;
9071 	} *bundle;
9072 
9073 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9074 
9075 	if (!bundle) {
9076 		dm_error("Failed to allocate update bundle\n");
9077 		goto cleanup;
9078 	}
9079 
9080 	/*
9081 	 * Disable the cursor first if we're disabling all the planes.
9082 	 * It'll remain on the screen after the planes are re-enabled
9083 	 * if we don't.
9084 	 */
9085 	if (acrtc_state->active_planes == 0)
9086 		amdgpu_dm_commit_cursors(state);
9087 
9088 	/* update planes when needed */
9089 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9090 		struct drm_crtc *crtc = new_plane_state->crtc;
9091 		struct drm_crtc_state *new_crtc_state;
9092 		struct drm_framebuffer *fb = new_plane_state->fb;
9093 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9094 		bool plane_needs_flip;
9095 		struct dc_plane_state *dc_plane;
9096 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9097 
9098 		/* Cursor plane is handled after stream updates */
9099 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9100 			continue;
9101 
9102 		if (!fb || !crtc || pcrtc != crtc)
9103 			continue;
9104 
9105 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9106 		if (!new_crtc_state->active)
9107 			continue;
9108 
9109 		dc_plane = dm_new_plane_state->dc_state;
9110 
9111 		bundle->surface_updates[planes_count].surface = dc_plane;
9112 		if (new_pcrtc_state->color_mgmt_changed) {
9113 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9114 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9115 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9116 		}
9117 
9118 		fill_dc_scaling_info(dm->adev, new_plane_state,
9119 				     &bundle->scaling_infos[planes_count]);
9120 
9121 		bundle->surface_updates[planes_count].scaling_info =
9122 			&bundle->scaling_infos[planes_count];
9123 
9124 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9125 
9126 		pflip_present = pflip_present || plane_needs_flip;
9127 
9128 		if (!plane_needs_flip) {
9129 			planes_count += 1;
9130 			continue;
9131 		}
9132 
9133 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9134 
9135 		/*
9136 		 * Wait for all fences on this FB. Do limited wait to avoid
9137 		 * deadlock during GPU reset when this fence will not signal
9138 		 * but we hold reservation lock for the BO.
9139 		 */
9140 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9141 					  msecs_to_jiffies(5000));
9142 		if (unlikely(r <= 0))
9143 			DRM_ERROR("Waiting for fences timed out!");
9144 
9145 		fill_dc_plane_info_and_addr(
9146 			dm->adev, new_plane_state,
9147 			afb->tiling_flags,
9148 			&bundle->plane_infos[planes_count],
9149 			&bundle->flip_addrs[planes_count].address,
9150 			afb->tmz_surface, false);
9151 
9152 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9153 				 new_plane_state->plane->index,
9154 				 bundle->plane_infos[planes_count].dcc.enable);
9155 
9156 		bundle->surface_updates[planes_count].plane_info =
9157 			&bundle->plane_infos[planes_count];
9158 
9159 		/*
9160 		 * Only allow immediate flips for fast updates that don't
9161 		 * change FB pitch, DCC state, rotation or mirroing.
9162 		 */
9163 		bundle->flip_addrs[planes_count].flip_immediate =
9164 			crtc->state->async_flip &&
9165 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9166 
9167 		timestamp_ns = ktime_get_ns();
9168 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9169 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9170 		bundle->surface_updates[planes_count].surface = dc_plane;
9171 
9172 		if (!bundle->surface_updates[planes_count].surface) {
9173 			DRM_ERROR("No surface for CRTC: id=%d\n",
9174 					acrtc_attach->crtc_id);
9175 			continue;
9176 		}
9177 
9178 		if (plane == pcrtc->primary)
9179 			update_freesync_state_on_stream(
9180 				dm,
9181 				acrtc_state,
9182 				acrtc_state->stream,
9183 				dc_plane,
9184 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9185 
9186 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9187 				 __func__,
9188 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9189 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9190 
9191 		planes_count += 1;
9192 
9193 	}
9194 
9195 	if (pflip_present) {
9196 		if (!vrr_active) {
9197 			/* Use old throttling in non-vrr fixed refresh rate mode
9198 			 * to keep flip scheduling based on target vblank counts
9199 			 * working in a backwards compatible way, e.g., for
9200 			 * clients using the GLX_OML_sync_control extension or
9201 			 * DRI3/Present extension with defined target_msc.
9202 			 */
9203 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9204 		}
9205 		else {
9206 			/* For variable refresh rate mode only:
9207 			 * Get vblank of last completed flip to avoid > 1 vrr
9208 			 * flips per video frame by use of throttling, but allow
9209 			 * flip programming anywhere in the possibly large
9210 			 * variable vrr vblank interval for fine-grained flip
9211 			 * timing control and more opportunity to avoid stutter
9212 			 * on late submission of flips.
9213 			 */
9214 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9215 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9216 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9217 		}
9218 
9219 		target_vblank = last_flip_vblank + wait_for_vblank;
9220 
9221 		/*
9222 		 * Wait until we're out of the vertical blank period before the one
9223 		 * targeted by the flip
9224 		 */
9225 		while ((acrtc_attach->enabled &&
9226 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9227 							    0, &vpos, &hpos, NULL,
9228 							    NULL, &pcrtc->hwmode)
9229 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9230 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9231 			(int)(target_vblank -
9232 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9233 			usleep_range(1000, 1100);
9234 		}
9235 
9236 		/**
9237 		 * Prepare the flip event for the pageflip interrupt to handle.
9238 		 *
9239 		 * This only works in the case where we've already turned on the
9240 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9241 		 * from 0 -> n planes we have to skip a hardware generated event
9242 		 * and rely on sending it from software.
9243 		 */
9244 		if (acrtc_attach->base.state->event &&
9245 		    acrtc_state->active_planes > 0 &&
9246 		    !acrtc_state->force_dpms_off) {
9247 			drm_crtc_vblank_get(pcrtc);
9248 
9249 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9250 
9251 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9252 			prepare_flip_isr(acrtc_attach);
9253 
9254 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9255 		}
9256 
9257 		if (acrtc_state->stream) {
9258 			if (acrtc_state->freesync_vrr_info_changed)
9259 				bundle->stream_update.vrr_infopacket =
9260 					&acrtc_state->stream->vrr_infopacket;
9261 		}
9262 	}
9263 
9264 	/* Update the planes if changed or disable if we don't have any. */
9265 	if ((planes_count || acrtc_state->active_planes == 0) &&
9266 		acrtc_state->stream) {
9267 #if defined(CONFIG_DRM_AMD_DC_DCN)
9268 		/*
9269 		 * If PSR or idle optimizations are enabled then flush out
9270 		 * any pending work before hardware programming.
9271 		 */
9272 		if (dm->vblank_control_workqueue)
9273 			flush_workqueue(dm->vblank_control_workqueue);
9274 #endif
9275 
9276 		bundle->stream_update.stream = acrtc_state->stream;
9277 		if (new_pcrtc_state->mode_changed) {
9278 			bundle->stream_update.src = acrtc_state->stream->src;
9279 			bundle->stream_update.dst = acrtc_state->stream->dst;
9280 		}
9281 
9282 		if (new_pcrtc_state->color_mgmt_changed) {
9283 			/*
9284 			 * TODO: This isn't fully correct since we've actually
9285 			 * already modified the stream in place.
9286 			 */
9287 			bundle->stream_update.gamut_remap =
9288 				&acrtc_state->stream->gamut_remap_matrix;
9289 			bundle->stream_update.output_csc_transform =
9290 				&acrtc_state->stream->csc_color_matrix;
9291 			bundle->stream_update.out_transfer_func =
9292 				acrtc_state->stream->out_transfer_func;
9293 		}
9294 
9295 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9296 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9297 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9298 
9299 		/*
9300 		 * If FreeSync state on the stream has changed then we need to
9301 		 * re-adjust the min/max bounds now that DC doesn't handle this
9302 		 * as part of commit.
9303 		 */
9304 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9305 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9306 			dc_stream_adjust_vmin_vmax(
9307 				dm->dc, acrtc_state->stream,
9308 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9309 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9310 		}
9311 		mutex_lock(&dm->dc_lock);
9312 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9313 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9314 			amdgpu_dm_psr_disable(acrtc_state->stream);
9315 
9316 		dc_commit_updates_for_stream(dm->dc,
9317 						     bundle->surface_updates,
9318 						     planes_count,
9319 						     acrtc_state->stream,
9320 						     &bundle->stream_update,
9321 						     dc_state);
9322 
9323 		/**
9324 		 * Enable or disable the interrupts on the backend.
9325 		 *
9326 		 * Most pipes are put into power gating when unused.
9327 		 *
9328 		 * When power gating is enabled on a pipe we lose the
9329 		 * interrupt enablement state when power gating is disabled.
9330 		 *
9331 		 * So we need to update the IRQ control state in hardware
9332 		 * whenever the pipe turns on (since it could be previously
9333 		 * power gated) or off (since some pipes can't be power gated
9334 		 * on some ASICs).
9335 		 */
9336 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9337 			dm_update_pflip_irq_state(drm_to_adev(dev),
9338 						  acrtc_attach);
9339 
9340 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9341 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9342 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9343 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9344 
9345 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9346 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9347 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9348 			struct amdgpu_dm_connector *aconn =
9349 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9350 
9351 			if (aconn->psr_skip_count > 0)
9352 				aconn->psr_skip_count--;
9353 
9354 			/* Allow PSR when skip count is 0. */
9355 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9356 		} else {
9357 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9358 		}
9359 
9360 		mutex_unlock(&dm->dc_lock);
9361 	}
9362 
9363 	/*
9364 	 * Update cursor state *after* programming all the planes.
9365 	 * This avoids redundant programming in the case where we're going
9366 	 * to be disabling a single plane - those pipes are being disabled.
9367 	 */
9368 	if (acrtc_state->active_planes)
9369 		amdgpu_dm_commit_cursors(state);
9370 
9371 cleanup:
9372 	kfree(bundle);
9373 }
9374 
9375 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9376 				   struct drm_atomic_state *state)
9377 {
9378 	struct amdgpu_device *adev = drm_to_adev(dev);
9379 	struct amdgpu_dm_connector *aconnector;
9380 	struct drm_connector *connector;
9381 	struct drm_connector_state *old_con_state, *new_con_state;
9382 	struct drm_crtc_state *new_crtc_state;
9383 	struct dm_crtc_state *new_dm_crtc_state;
9384 	const struct dc_stream_status *status;
9385 	int i, inst;
9386 
9387 	/* Notify device removals. */
9388 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9389 		if (old_con_state->crtc != new_con_state->crtc) {
9390 			/* CRTC changes require notification. */
9391 			goto notify;
9392 		}
9393 
9394 		if (!new_con_state->crtc)
9395 			continue;
9396 
9397 		new_crtc_state = drm_atomic_get_new_crtc_state(
9398 			state, new_con_state->crtc);
9399 
9400 		if (!new_crtc_state)
9401 			continue;
9402 
9403 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9404 			continue;
9405 
9406 	notify:
9407 		aconnector = to_amdgpu_dm_connector(connector);
9408 
9409 		mutex_lock(&adev->dm.audio_lock);
9410 		inst = aconnector->audio_inst;
9411 		aconnector->audio_inst = -1;
9412 		mutex_unlock(&adev->dm.audio_lock);
9413 
9414 		amdgpu_dm_audio_eld_notify(adev, inst);
9415 	}
9416 
9417 	/* Notify audio device additions. */
9418 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9419 		if (!new_con_state->crtc)
9420 			continue;
9421 
9422 		new_crtc_state = drm_atomic_get_new_crtc_state(
9423 			state, new_con_state->crtc);
9424 
9425 		if (!new_crtc_state)
9426 			continue;
9427 
9428 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9429 			continue;
9430 
9431 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9432 		if (!new_dm_crtc_state->stream)
9433 			continue;
9434 
9435 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9436 		if (!status)
9437 			continue;
9438 
9439 		aconnector = to_amdgpu_dm_connector(connector);
9440 
9441 		mutex_lock(&adev->dm.audio_lock);
9442 		inst = status->audio_inst;
9443 		aconnector->audio_inst = inst;
9444 		mutex_unlock(&adev->dm.audio_lock);
9445 
9446 		amdgpu_dm_audio_eld_notify(adev, inst);
9447 	}
9448 }
9449 
9450 /*
9451  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9452  * @crtc_state: the DRM CRTC state
9453  * @stream_state: the DC stream state.
9454  *
9455  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9456  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9457  */
9458 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9459 						struct dc_stream_state *stream_state)
9460 {
9461 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9462 }
9463 
9464 /**
9465  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9466  * @state: The atomic state to commit
9467  *
9468  * This will tell DC to commit the constructed DC state from atomic_check,
9469  * programming the hardware. Any failures here implies a hardware failure, since
9470  * atomic check should have filtered anything non-kosher.
9471  */
9472 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9473 {
9474 	struct drm_device *dev = state->dev;
9475 	struct amdgpu_device *adev = drm_to_adev(dev);
9476 	struct amdgpu_display_manager *dm = &adev->dm;
9477 	struct dm_atomic_state *dm_state;
9478 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9479 	uint32_t i, j;
9480 	struct drm_crtc *crtc;
9481 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9482 	unsigned long flags;
9483 	bool wait_for_vblank = true;
9484 	struct drm_connector *connector;
9485 	struct drm_connector_state *old_con_state, *new_con_state;
9486 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9487 	int crtc_disable_count = 0;
9488 	bool mode_set_reset_required = false;
9489 
9490 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9491 
9492 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9493 
9494 	dm_state = dm_atomic_get_new_state(state);
9495 	if (dm_state && dm_state->context) {
9496 		dc_state = dm_state->context;
9497 	} else {
9498 		/* No state changes, retain current state. */
9499 		dc_state_temp = dc_create_state(dm->dc);
9500 		ASSERT(dc_state_temp);
9501 		dc_state = dc_state_temp;
9502 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9503 	}
9504 
9505 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9506 				       new_crtc_state, i) {
9507 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9508 
9509 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9510 
9511 		if (old_crtc_state->active &&
9512 		    (!new_crtc_state->active ||
9513 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9514 			manage_dm_interrupts(adev, acrtc, false);
9515 			dc_stream_release(dm_old_crtc_state->stream);
9516 		}
9517 	}
9518 
9519 	drm_atomic_helper_calc_timestamping_constants(state);
9520 
9521 	/* update changed items */
9522 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9523 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9524 
9525 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9526 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9527 
9528 		DRM_DEBUG_ATOMIC(
9529 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9530 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9531 			"connectors_changed:%d\n",
9532 			acrtc->crtc_id,
9533 			new_crtc_state->enable,
9534 			new_crtc_state->active,
9535 			new_crtc_state->planes_changed,
9536 			new_crtc_state->mode_changed,
9537 			new_crtc_state->active_changed,
9538 			new_crtc_state->connectors_changed);
9539 
9540 		/* Disable cursor if disabling crtc */
9541 		if (old_crtc_state->active && !new_crtc_state->active) {
9542 			struct dc_cursor_position position;
9543 
9544 			memset(&position, 0, sizeof(position));
9545 			mutex_lock(&dm->dc_lock);
9546 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9547 			mutex_unlock(&dm->dc_lock);
9548 		}
9549 
9550 		/* Copy all transient state flags into dc state */
9551 		if (dm_new_crtc_state->stream) {
9552 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9553 							    dm_new_crtc_state->stream);
9554 		}
9555 
9556 		/* handles headless hotplug case, updating new_state and
9557 		 * aconnector as needed
9558 		 */
9559 
9560 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9561 
9562 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9563 
9564 			if (!dm_new_crtc_state->stream) {
9565 				/*
9566 				 * this could happen because of issues with
9567 				 * userspace notifications delivery.
9568 				 * In this case userspace tries to set mode on
9569 				 * display which is disconnected in fact.
9570 				 * dc_sink is NULL in this case on aconnector.
9571 				 * We expect reset mode will come soon.
9572 				 *
9573 				 * This can also happen when unplug is done
9574 				 * during resume sequence ended
9575 				 *
9576 				 * In this case, we want to pretend we still
9577 				 * have a sink to keep the pipe running so that
9578 				 * hw state is consistent with the sw state
9579 				 */
9580 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9581 						__func__, acrtc->base.base.id);
9582 				continue;
9583 			}
9584 
9585 			if (dm_old_crtc_state->stream)
9586 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9587 
9588 			pm_runtime_get_noresume(dev->dev);
9589 
9590 			acrtc->enabled = true;
9591 			acrtc->hw_mode = new_crtc_state->mode;
9592 			crtc->hwmode = new_crtc_state->mode;
9593 			mode_set_reset_required = true;
9594 		} else if (modereset_required(new_crtc_state)) {
9595 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9596 			/* i.e. reset mode */
9597 			if (dm_old_crtc_state->stream)
9598 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9599 
9600 			mode_set_reset_required = true;
9601 		}
9602 	} /* for_each_crtc_in_state() */
9603 
9604 	if (dc_state) {
9605 		/* if there mode set or reset, disable eDP PSR */
9606 		if (mode_set_reset_required) {
9607 #if defined(CONFIG_DRM_AMD_DC_DCN)
9608 			if (dm->vblank_control_workqueue)
9609 				flush_workqueue(dm->vblank_control_workqueue);
9610 #endif
9611 			amdgpu_dm_psr_disable_all(dm);
9612 		}
9613 
9614 		dm_enable_per_frame_crtc_master_sync(dc_state);
9615 		mutex_lock(&dm->dc_lock);
9616 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9617 #if defined(CONFIG_DRM_AMD_DC_DCN)
9618                /* Allow idle optimization when vblank count is 0 for display off */
9619                if (dm->active_vblank_irq_count == 0)
9620                    dc_allow_idle_optimizations(dm->dc,true);
9621 #endif
9622 		mutex_unlock(&dm->dc_lock);
9623 	}
9624 
9625 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9626 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9627 
9628 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9629 
9630 		if (dm_new_crtc_state->stream != NULL) {
9631 			const struct dc_stream_status *status =
9632 					dc_stream_get_status(dm_new_crtc_state->stream);
9633 
9634 			if (!status)
9635 				status = dc_stream_get_status_from_state(dc_state,
9636 									 dm_new_crtc_state->stream);
9637 			if (!status)
9638 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9639 			else
9640 				acrtc->otg_inst = status->primary_otg_inst;
9641 		}
9642 	}
9643 #ifdef CONFIG_DRM_AMD_DC_HDCP
9644 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9645 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9646 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9647 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9648 
9649 		new_crtc_state = NULL;
9650 
9651 		if (acrtc)
9652 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9653 
9654 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9655 
9656 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9657 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9658 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9659 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9660 			dm_new_con_state->update_hdcp = true;
9661 			continue;
9662 		}
9663 
9664 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9665 			hdcp_update_display(
9666 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9667 				new_con_state->hdcp_content_type,
9668 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9669 	}
9670 #endif
9671 
9672 	/* Handle connector state changes */
9673 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9674 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9675 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9676 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9677 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9678 		struct dc_stream_update stream_update;
9679 		struct dc_info_packet hdr_packet;
9680 		struct dc_stream_status *status = NULL;
9681 		bool abm_changed, hdr_changed, scaling_changed;
9682 
9683 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9684 		memset(&stream_update, 0, sizeof(stream_update));
9685 
9686 		if (acrtc) {
9687 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9688 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9689 		}
9690 
9691 		/* Skip any modesets/resets */
9692 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9693 			continue;
9694 
9695 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9696 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9697 
9698 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9699 							     dm_old_con_state);
9700 
9701 		abm_changed = dm_new_crtc_state->abm_level !=
9702 			      dm_old_crtc_state->abm_level;
9703 
9704 		hdr_changed =
9705 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9706 
9707 		if (!scaling_changed && !abm_changed && !hdr_changed)
9708 			continue;
9709 
9710 		stream_update.stream = dm_new_crtc_state->stream;
9711 		if (scaling_changed) {
9712 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9713 					dm_new_con_state, dm_new_crtc_state->stream);
9714 
9715 			stream_update.src = dm_new_crtc_state->stream->src;
9716 			stream_update.dst = dm_new_crtc_state->stream->dst;
9717 		}
9718 
9719 		if (abm_changed) {
9720 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9721 
9722 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9723 		}
9724 
9725 		if (hdr_changed) {
9726 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9727 			stream_update.hdr_static_metadata = &hdr_packet;
9728 		}
9729 
9730 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9731 
9732 		if (WARN_ON(!status))
9733 			continue;
9734 
9735 		WARN_ON(!status->plane_count);
9736 
9737 		/*
9738 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9739 		 * Here we create an empty update on each plane.
9740 		 * To fix this, DC should permit updating only stream properties.
9741 		 */
9742 		for (j = 0; j < status->plane_count; j++)
9743 			dummy_updates[j].surface = status->plane_states[0];
9744 
9745 
9746 		mutex_lock(&dm->dc_lock);
9747 		dc_commit_updates_for_stream(dm->dc,
9748 						     dummy_updates,
9749 						     status->plane_count,
9750 						     dm_new_crtc_state->stream,
9751 						     &stream_update,
9752 						     dc_state);
9753 		mutex_unlock(&dm->dc_lock);
9754 	}
9755 
9756 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9757 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9758 				      new_crtc_state, i) {
9759 		if (old_crtc_state->active && !new_crtc_state->active)
9760 			crtc_disable_count++;
9761 
9762 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9763 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9764 
9765 		/* For freesync config update on crtc state and params for irq */
9766 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9767 
9768 		/* Handle vrr on->off / off->on transitions */
9769 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9770 						dm_new_crtc_state);
9771 	}
9772 
9773 	/**
9774 	 * Enable interrupts for CRTCs that are newly enabled or went through
9775 	 * a modeset. It was intentionally deferred until after the front end
9776 	 * state was modified to wait until the OTG was on and so the IRQ
9777 	 * handlers didn't access stale or invalid state.
9778 	 */
9779 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9780 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9781 #ifdef CONFIG_DEBUG_FS
9782 		bool configure_crc = false;
9783 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9784 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9785 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9786 #endif
9787 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9788 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9789 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9790 #endif
9791 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9792 
9793 		if (new_crtc_state->active &&
9794 		    (!old_crtc_state->active ||
9795 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9796 			dc_stream_retain(dm_new_crtc_state->stream);
9797 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9798 			manage_dm_interrupts(adev, acrtc, true);
9799 
9800 #ifdef CONFIG_DEBUG_FS
9801 			/**
9802 			 * Frontend may have changed so reapply the CRC capture
9803 			 * settings for the stream.
9804 			 */
9805 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9806 
9807 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9808 				configure_crc = true;
9809 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9810 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9811 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9812 					acrtc->dm_irq_params.crc_window.update_win = true;
9813 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9814 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9815 					crc_rd_wrk->crtc = crtc;
9816 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9817 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9818 				}
9819 #endif
9820 			}
9821 
9822 			if (configure_crc)
9823 				if (amdgpu_dm_crtc_configure_crc_source(
9824 					crtc, dm_new_crtc_state, cur_crc_src))
9825 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9826 #endif
9827 		}
9828 	}
9829 
9830 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9831 		if (new_crtc_state->async_flip)
9832 			wait_for_vblank = false;
9833 
9834 	/* update planes when needed per crtc*/
9835 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9836 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9837 
9838 		if (dm_new_crtc_state->stream)
9839 			amdgpu_dm_commit_planes(state, dc_state, dev,
9840 						dm, crtc, wait_for_vblank);
9841 	}
9842 
9843 	/* Update audio instances for each connector. */
9844 	amdgpu_dm_commit_audio(dev, state);
9845 
9846 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9847 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9848 	/* restore the backlight level */
9849 	for (i = 0; i < dm->num_of_edps; i++) {
9850 		if (dm->backlight_dev[i] &&
9851 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9852 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9853 	}
9854 #endif
9855 	/*
9856 	 * send vblank event on all events not handled in flip and
9857 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9858 	 */
9859 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9860 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9861 
9862 		if (new_crtc_state->event)
9863 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9864 
9865 		new_crtc_state->event = NULL;
9866 	}
9867 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9868 
9869 	/* Signal HW programming completion */
9870 	drm_atomic_helper_commit_hw_done(state);
9871 
9872 	if (wait_for_vblank)
9873 		drm_atomic_helper_wait_for_flip_done(dev, state);
9874 
9875 	drm_atomic_helper_cleanup_planes(dev, state);
9876 
9877 	/* return the stolen vga memory back to VRAM */
9878 	if (!adev->mman.keep_stolen_vga_memory)
9879 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9880 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9881 
9882 	/*
9883 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9884 	 * so we can put the GPU into runtime suspend if we're not driving any
9885 	 * displays anymore
9886 	 */
9887 	for (i = 0; i < crtc_disable_count; i++)
9888 		pm_runtime_put_autosuspend(dev->dev);
9889 	pm_runtime_mark_last_busy(dev->dev);
9890 
9891 	if (dc_state_temp)
9892 		dc_release_state(dc_state_temp);
9893 }
9894 
9895 
9896 static int dm_force_atomic_commit(struct drm_connector *connector)
9897 {
9898 	int ret = 0;
9899 	struct drm_device *ddev = connector->dev;
9900 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9901 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9902 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9903 	struct drm_connector_state *conn_state;
9904 	struct drm_crtc_state *crtc_state;
9905 	struct drm_plane_state *plane_state;
9906 
9907 	if (!state)
9908 		return -ENOMEM;
9909 
9910 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9911 
9912 	/* Construct an atomic state to restore previous display setting */
9913 
9914 	/*
9915 	 * Attach connectors to drm_atomic_state
9916 	 */
9917 	conn_state = drm_atomic_get_connector_state(state, connector);
9918 
9919 	ret = PTR_ERR_OR_ZERO(conn_state);
9920 	if (ret)
9921 		goto out;
9922 
9923 	/* Attach crtc to drm_atomic_state*/
9924 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9925 
9926 	ret = PTR_ERR_OR_ZERO(crtc_state);
9927 	if (ret)
9928 		goto out;
9929 
9930 	/* force a restore */
9931 	crtc_state->mode_changed = true;
9932 
9933 	/* Attach plane to drm_atomic_state */
9934 	plane_state = drm_atomic_get_plane_state(state, plane);
9935 
9936 	ret = PTR_ERR_OR_ZERO(plane_state);
9937 	if (ret)
9938 		goto out;
9939 
9940 	/* Call commit internally with the state we just constructed */
9941 	ret = drm_atomic_commit(state);
9942 
9943 out:
9944 	drm_atomic_state_put(state);
9945 	if (ret)
9946 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9947 
9948 	return ret;
9949 }
9950 
9951 /*
9952  * This function handles all cases when set mode does not come upon hotplug.
9953  * This includes when a display is unplugged then plugged back into the
9954  * same port and when running without usermode desktop manager supprot
9955  */
9956 void dm_restore_drm_connector_state(struct drm_device *dev,
9957 				    struct drm_connector *connector)
9958 {
9959 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9960 	struct amdgpu_crtc *disconnected_acrtc;
9961 	struct dm_crtc_state *acrtc_state;
9962 
9963 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9964 		return;
9965 
9966 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9967 	if (!disconnected_acrtc)
9968 		return;
9969 
9970 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9971 	if (!acrtc_state->stream)
9972 		return;
9973 
9974 	/*
9975 	 * If the previous sink is not released and different from the current,
9976 	 * we deduce we are in a state where we can not rely on usermode call
9977 	 * to turn on the display, so we do it here
9978 	 */
9979 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9980 		dm_force_atomic_commit(&aconnector->base);
9981 }
9982 
9983 /*
9984  * Grabs all modesetting locks to serialize against any blocking commits,
9985  * Waits for completion of all non blocking commits.
9986  */
9987 static int do_aquire_global_lock(struct drm_device *dev,
9988 				 struct drm_atomic_state *state)
9989 {
9990 	struct drm_crtc *crtc;
9991 	struct drm_crtc_commit *commit;
9992 	long ret;
9993 
9994 	/*
9995 	 * Adding all modeset locks to aquire_ctx will
9996 	 * ensure that when the framework release it the
9997 	 * extra locks we are locking here will get released to
9998 	 */
9999 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10000 	if (ret)
10001 		return ret;
10002 
10003 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10004 		spin_lock(&crtc->commit_lock);
10005 		commit = list_first_entry_or_null(&crtc->commit_list,
10006 				struct drm_crtc_commit, commit_entry);
10007 		if (commit)
10008 			drm_crtc_commit_get(commit);
10009 		spin_unlock(&crtc->commit_lock);
10010 
10011 		if (!commit)
10012 			continue;
10013 
10014 		/*
10015 		 * Make sure all pending HW programming completed and
10016 		 * page flips done
10017 		 */
10018 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10019 
10020 		if (ret > 0)
10021 			ret = wait_for_completion_interruptible_timeout(
10022 					&commit->flip_done, 10*HZ);
10023 
10024 		if (ret == 0)
10025 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10026 				  "timed out\n", crtc->base.id, crtc->name);
10027 
10028 		drm_crtc_commit_put(commit);
10029 	}
10030 
10031 	return ret < 0 ? ret : 0;
10032 }
10033 
10034 static void get_freesync_config_for_crtc(
10035 	struct dm_crtc_state *new_crtc_state,
10036 	struct dm_connector_state *new_con_state)
10037 {
10038 	struct mod_freesync_config config = {0};
10039 	struct amdgpu_dm_connector *aconnector =
10040 			to_amdgpu_dm_connector(new_con_state->base.connector);
10041 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10042 	int vrefresh = drm_mode_vrefresh(mode);
10043 	bool fs_vid_mode = false;
10044 
10045 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10046 					vrefresh >= aconnector->min_vfreq &&
10047 					vrefresh <= aconnector->max_vfreq;
10048 
10049 	if (new_crtc_state->vrr_supported) {
10050 		new_crtc_state->stream->ignore_msa_timing_param = true;
10051 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10052 
10053 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10054 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10055 		config.vsif_supported = true;
10056 		config.btr = true;
10057 
10058 		if (fs_vid_mode) {
10059 			config.state = VRR_STATE_ACTIVE_FIXED;
10060 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10061 			goto out;
10062 		} else if (new_crtc_state->base.vrr_enabled) {
10063 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10064 		} else {
10065 			config.state = VRR_STATE_INACTIVE;
10066 		}
10067 	}
10068 out:
10069 	new_crtc_state->freesync_config = config;
10070 }
10071 
10072 static void reset_freesync_config_for_crtc(
10073 	struct dm_crtc_state *new_crtc_state)
10074 {
10075 	new_crtc_state->vrr_supported = false;
10076 
10077 	memset(&new_crtc_state->vrr_infopacket, 0,
10078 	       sizeof(new_crtc_state->vrr_infopacket));
10079 }
10080 
10081 static bool
10082 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10083 				 struct drm_crtc_state *new_crtc_state)
10084 {
10085 	struct drm_display_mode old_mode, new_mode;
10086 
10087 	if (!old_crtc_state || !new_crtc_state)
10088 		return false;
10089 
10090 	old_mode = old_crtc_state->mode;
10091 	new_mode = new_crtc_state->mode;
10092 
10093 	if (old_mode.clock       == new_mode.clock &&
10094 	    old_mode.hdisplay    == new_mode.hdisplay &&
10095 	    old_mode.vdisplay    == new_mode.vdisplay &&
10096 	    old_mode.htotal      == new_mode.htotal &&
10097 	    old_mode.vtotal      != new_mode.vtotal &&
10098 	    old_mode.hsync_start == new_mode.hsync_start &&
10099 	    old_mode.vsync_start != new_mode.vsync_start &&
10100 	    old_mode.hsync_end   == new_mode.hsync_end &&
10101 	    old_mode.vsync_end   != new_mode.vsync_end &&
10102 	    old_mode.hskew       == new_mode.hskew &&
10103 	    old_mode.vscan       == new_mode.vscan &&
10104 	    (old_mode.vsync_end - old_mode.vsync_start) ==
10105 	    (new_mode.vsync_end - new_mode.vsync_start))
10106 		return true;
10107 
10108 	return false;
10109 }
10110 
10111 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10112 	uint64_t num, den, res;
10113 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10114 
10115 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10116 
10117 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10118 	den = (unsigned long long)new_crtc_state->mode.htotal *
10119 	      (unsigned long long)new_crtc_state->mode.vtotal;
10120 
10121 	res = div_u64(num, den);
10122 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10123 }
10124 
10125 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10126 				struct drm_atomic_state *state,
10127 				struct drm_crtc *crtc,
10128 				struct drm_crtc_state *old_crtc_state,
10129 				struct drm_crtc_state *new_crtc_state,
10130 				bool enable,
10131 				bool *lock_and_validation_needed)
10132 {
10133 	struct dm_atomic_state *dm_state = NULL;
10134 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10135 	struct dc_stream_state *new_stream;
10136 	int ret = 0;
10137 
10138 	/*
10139 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10140 	 * update changed items
10141 	 */
10142 	struct amdgpu_crtc *acrtc = NULL;
10143 	struct amdgpu_dm_connector *aconnector = NULL;
10144 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10145 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10146 
10147 	new_stream = NULL;
10148 
10149 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10150 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10151 	acrtc = to_amdgpu_crtc(crtc);
10152 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10153 
10154 	/* TODO This hack should go away */
10155 	if (aconnector && enable) {
10156 		/* Make sure fake sink is created in plug-in scenario */
10157 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10158 							    &aconnector->base);
10159 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10160 							    &aconnector->base);
10161 
10162 		if (IS_ERR(drm_new_conn_state)) {
10163 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10164 			goto fail;
10165 		}
10166 
10167 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10168 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10169 
10170 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10171 			goto skip_modeset;
10172 
10173 		new_stream = create_validate_stream_for_sink(aconnector,
10174 							     &new_crtc_state->mode,
10175 							     dm_new_conn_state,
10176 							     dm_old_crtc_state->stream);
10177 
10178 		/*
10179 		 * we can have no stream on ACTION_SET if a display
10180 		 * was disconnected during S3, in this case it is not an
10181 		 * error, the OS will be updated after detection, and
10182 		 * will do the right thing on next atomic commit
10183 		 */
10184 
10185 		if (!new_stream) {
10186 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10187 					__func__, acrtc->base.base.id);
10188 			ret = -ENOMEM;
10189 			goto fail;
10190 		}
10191 
10192 		/*
10193 		 * TODO: Check VSDB bits to decide whether this should
10194 		 * be enabled or not.
10195 		 */
10196 		new_stream->triggered_crtc_reset.enabled =
10197 			dm->force_timing_sync;
10198 
10199 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10200 
10201 		ret = fill_hdr_info_packet(drm_new_conn_state,
10202 					   &new_stream->hdr_static_metadata);
10203 		if (ret)
10204 			goto fail;
10205 
10206 		/*
10207 		 * If we already removed the old stream from the context
10208 		 * (and set the new stream to NULL) then we can't reuse
10209 		 * the old stream even if the stream and scaling are unchanged.
10210 		 * We'll hit the BUG_ON and black screen.
10211 		 *
10212 		 * TODO: Refactor this function to allow this check to work
10213 		 * in all conditions.
10214 		 */
10215 		if (amdgpu_freesync_vid_mode &&
10216 		    dm_new_crtc_state->stream &&
10217 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10218 			goto skip_modeset;
10219 
10220 		if (dm_new_crtc_state->stream &&
10221 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10222 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10223 			new_crtc_state->mode_changed = false;
10224 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10225 					 new_crtc_state->mode_changed);
10226 		}
10227 	}
10228 
10229 	/* mode_changed flag may get updated above, need to check again */
10230 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10231 		goto skip_modeset;
10232 
10233 	DRM_DEBUG_ATOMIC(
10234 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10235 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10236 		"connectors_changed:%d\n",
10237 		acrtc->crtc_id,
10238 		new_crtc_state->enable,
10239 		new_crtc_state->active,
10240 		new_crtc_state->planes_changed,
10241 		new_crtc_state->mode_changed,
10242 		new_crtc_state->active_changed,
10243 		new_crtc_state->connectors_changed);
10244 
10245 	/* Remove stream for any changed/disabled CRTC */
10246 	if (!enable) {
10247 
10248 		if (!dm_old_crtc_state->stream)
10249 			goto skip_modeset;
10250 
10251 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10252 		    is_timing_unchanged_for_freesync(new_crtc_state,
10253 						     old_crtc_state)) {
10254 			new_crtc_state->mode_changed = false;
10255 			DRM_DEBUG_DRIVER(
10256 				"Mode change not required for front porch change, "
10257 				"setting mode_changed to %d",
10258 				new_crtc_state->mode_changed);
10259 
10260 			set_freesync_fixed_config(dm_new_crtc_state);
10261 
10262 			goto skip_modeset;
10263 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10264 			   is_freesync_video_mode(&new_crtc_state->mode,
10265 						  aconnector)) {
10266 			struct drm_display_mode *high_mode;
10267 
10268 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10269 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10270 				set_freesync_fixed_config(dm_new_crtc_state);
10271 			}
10272 		}
10273 
10274 		ret = dm_atomic_get_state(state, &dm_state);
10275 		if (ret)
10276 			goto fail;
10277 
10278 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10279 				crtc->base.id);
10280 
10281 		/* i.e. reset mode */
10282 		if (dc_remove_stream_from_ctx(
10283 				dm->dc,
10284 				dm_state->context,
10285 				dm_old_crtc_state->stream) != DC_OK) {
10286 			ret = -EINVAL;
10287 			goto fail;
10288 		}
10289 
10290 		dc_stream_release(dm_old_crtc_state->stream);
10291 		dm_new_crtc_state->stream = NULL;
10292 
10293 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10294 
10295 		*lock_and_validation_needed = true;
10296 
10297 	} else {/* Add stream for any updated/enabled CRTC */
10298 		/*
10299 		 * Quick fix to prevent NULL pointer on new_stream when
10300 		 * added MST connectors not found in existing crtc_state in the chained mode
10301 		 * TODO: need to dig out the root cause of that
10302 		 */
10303 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10304 			goto skip_modeset;
10305 
10306 		if (modereset_required(new_crtc_state))
10307 			goto skip_modeset;
10308 
10309 		if (modeset_required(new_crtc_state, new_stream,
10310 				     dm_old_crtc_state->stream)) {
10311 
10312 			WARN_ON(dm_new_crtc_state->stream);
10313 
10314 			ret = dm_atomic_get_state(state, &dm_state);
10315 			if (ret)
10316 				goto fail;
10317 
10318 			dm_new_crtc_state->stream = new_stream;
10319 
10320 			dc_stream_retain(new_stream);
10321 
10322 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10323 					 crtc->base.id);
10324 
10325 			if (dc_add_stream_to_ctx(
10326 					dm->dc,
10327 					dm_state->context,
10328 					dm_new_crtc_state->stream) != DC_OK) {
10329 				ret = -EINVAL;
10330 				goto fail;
10331 			}
10332 
10333 			*lock_and_validation_needed = true;
10334 		}
10335 	}
10336 
10337 skip_modeset:
10338 	/* Release extra reference */
10339 	if (new_stream)
10340 		 dc_stream_release(new_stream);
10341 
10342 	/*
10343 	 * We want to do dc stream updates that do not require a
10344 	 * full modeset below.
10345 	 */
10346 	if (!(enable && aconnector && new_crtc_state->active))
10347 		return 0;
10348 	/*
10349 	 * Given above conditions, the dc state cannot be NULL because:
10350 	 * 1. We're in the process of enabling CRTCs (just been added
10351 	 *    to the dc context, or already is on the context)
10352 	 * 2. Has a valid connector attached, and
10353 	 * 3. Is currently active and enabled.
10354 	 * => The dc stream state currently exists.
10355 	 */
10356 	BUG_ON(dm_new_crtc_state->stream == NULL);
10357 
10358 	/* Scaling or underscan settings */
10359 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10360 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10361 		update_stream_scaling_settings(
10362 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10363 
10364 	/* ABM settings */
10365 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10366 
10367 	/*
10368 	 * Color management settings. We also update color properties
10369 	 * when a modeset is needed, to ensure it gets reprogrammed.
10370 	 */
10371 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10372 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10373 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10374 		if (ret)
10375 			goto fail;
10376 	}
10377 
10378 	/* Update Freesync settings. */
10379 	get_freesync_config_for_crtc(dm_new_crtc_state,
10380 				     dm_new_conn_state);
10381 
10382 	return ret;
10383 
10384 fail:
10385 	if (new_stream)
10386 		dc_stream_release(new_stream);
10387 	return ret;
10388 }
10389 
10390 static bool should_reset_plane(struct drm_atomic_state *state,
10391 			       struct drm_plane *plane,
10392 			       struct drm_plane_state *old_plane_state,
10393 			       struct drm_plane_state *new_plane_state)
10394 {
10395 	struct drm_plane *other;
10396 	struct drm_plane_state *old_other_state, *new_other_state;
10397 	struct drm_crtc_state *new_crtc_state;
10398 	int i;
10399 
10400 	/*
10401 	 * TODO: Remove this hack once the checks below are sufficient
10402 	 * enough to determine when we need to reset all the planes on
10403 	 * the stream.
10404 	 */
10405 	if (state->allow_modeset)
10406 		return true;
10407 
10408 	/* Exit early if we know that we're adding or removing the plane. */
10409 	if (old_plane_state->crtc != new_plane_state->crtc)
10410 		return true;
10411 
10412 	/* old crtc == new_crtc == NULL, plane not in context. */
10413 	if (!new_plane_state->crtc)
10414 		return false;
10415 
10416 	new_crtc_state =
10417 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10418 
10419 	if (!new_crtc_state)
10420 		return true;
10421 
10422 	/* CRTC Degamma changes currently require us to recreate planes. */
10423 	if (new_crtc_state->color_mgmt_changed)
10424 		return true;
10425 
10426 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10427 		return true;
10428 
10429 	/*
10430 	 * If there are any new primary or overlay planes being added or
10431 	 * removed then the z-order can potentially change. To ensure
10432 	 * correct z-order and pipe acquisition the current DC architecture
10433 	 * requires us to remove and recreate all existing planes.
10434 	 *
10435 	 * TODO: Come up with a more elegant solution for this.
10436 	 */
10437 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10438 		struct amdgpu_framebuffer *old_afb, *new_afb;
10439 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10440 			continue;
10441 
10442 		if (old_other_state->crtc != new_plane_state->crtc &&
10443 		    new_other_state->crtc != new_plane_state->crtc)
10444 			continue;
10445 
10446 		if (old_other_state->crtc != new_other_state->crtc)
10447 			return true;
10448 
10449 		/* Src/dst size and scaling updates. */
10450 		if (old_other_state->src_w != new_other_state->src_w ||
10451 		    old_other_state->src_h != new_other_state->src_h ||
10452 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10453 		    old_other_state->crtc_h != new_other_state->crtc_h)
10454 			return true;
10455 
10456 		/* Rotation / mirroring updates. */
10457 		if (old_other_state->rotation != new_other_state->rotation)
10458 			return true;
10459 
10460 		/* Blending updates. */
10461 		if (old_other_state->pixel_blend_mode !=
10462 		    new_other_state->pixel_blend_mode)
10463 			return true;
10464 
10465 		/* Alpha updates. */
10466 		if (old_other_state->alpha != new_other_state->alpha)
10467 			return true;
10468 
10469 		/* Colorspace changes. */
10470 		if (old_other_state->color_range != new_other_state->color_range ||
10471 		    old_other_state->color_encoding != new_other_state->color_encoding)
10472 			return true;
10473 
10474 		/* Framebuffer checks fall at the end. */
10475 		if (!old_other_state->fb || !new_other_state->fb)
10476 			continue;
10477 
10478 		/* Pixel format changes can require bandwidth updates. */
10479 		if (old_other_state->fb->format != new_other_state->fb->format)
10480 			return true;
10481 
10482 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10483 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10484 
10485 		/* Tiling and DCC changes also require bandwidth updates. */
10486 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10487 		    old_afb->base.modifier != new_afb->base.modifier)
10488 			return true;
10489 	}
10490 
10491 	return false;
10492 }
10493 
10494 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10495 			      struct drm_plane_state *new_plane_state,
10496 			      struct drm_framebuffer *fb)
10497 {
10498 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10499 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10500 	unsigned int pitch;
10501 	bool linear;
10502 
10503 	if (fb->width > new_acrtc->max_cursor_width ||
10504 	    fb->height > new_acrtc->max_cursor_height) {
10505 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10506 				 new_plane_state->fb->width,
10507 				 new_plane_state->fb->height);
10508 		return -EINVAL;
10509 	}
10510 	if (new_plane_state->src_w != fb->width << 16 ||
10511 	    new_plane_state->src_h != fb->height << 16) {
10512 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10513 		return -EINVAL;
10514 	}
10515 
10516 	/* Pitch in pixels */
10517 	pitch = fb->pitches[0] / fb->format->cpp[0];
10518 
10519 	if (fb->width != pitch) {
10520 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10521 				 fb->width, pitch);
10522 		return -EINVAL;
10523 	}
10524 
10525 	switch (pitch) {
10526 	case 64:
10527 	case 128:
10528 	case 256:
10529 		/* FB pitch is supported by cursor plane */
10530 		break;
10531 	default:
10532 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10533 		return -EINVAL;
10534 	}
10535 
10536 	/* Core DRM takes care of checking FB modifiers, so we only need to
10537 	 * check tiling flags when the FB doesn't have a modifier. */
10538 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10539 		if (adev->family < AMDGPU_FAMILY_AI) {
10540 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10541 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10542 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10543 		} else {
10544 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10545 		}
10546 		if (!linear) {
10547 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10548 			return -EINVAL;
10549 		}
10550 	}
10551 
10552 	return 0;
10553 }
10554 
10555 static int dm_update_plane_state(struct dc *dc,
10556 				 struct drm_atomic_state *state,
10557 				 struct drm_plane *plane,
10558 				 struct drm_plane_state *old_plane_state,
10559 				 struct drm_plane_state *new_plane_state,
10560 				 bool enable,
10561 				 bool *lock_and_validation_needed)
10562 {
10563 
10564 	struct dm_atomic_state *dm_state = NULL;
10565 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10566 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10567 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10568 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10569 	struct amdgpu_crtc *new_acrtc;
10570 	bool needs_reset;
10571 	int ret = 0;
10572 
10573 
10574 	new_plane_crtc = new_plane_state->crtc;
10575 	old_plane_crtc = old_plane_state->crtc;
10576 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10577 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10578 
10579 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10580 		if (!enable || !new_plane_crtc ||
10581 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10582 			return 0;
10583 
10584 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10585 
10586 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10587 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10588 			return -EINVAL;
10589 		}
10590 
10591 		if (new_plane_state->fb) {
10592 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10593 						 new_plane_state->fb);
10594 			if (ret)
10595 				return ret;
10596 		}
10597 
10598 		return 0;
10599 	}
10600 
10601 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10602 					 new_plane_state);
10603 
10604 	/* Remove any changed/removed planes */
10605 	if (!enable) {
10606 		if (!needs_reset)
10607 			return 0;
10608 
10609 		if (!old_plane_crtc)
10610 			return 0;
10611 
10612 		old_crtc_state = drm_atomic_get_old_crtc_state(
10613 				state, old_plane_crtc);
10614 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10615 
10616 		if (!dm_old_crtc_state->stream)
10617 			return 0;
10618 
10619 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10620 				plane->base.id, old_plane_crtc->base.id);
10621 
10622 		ret = dm_atomic_get_state(state, &dm_state);
10623 		if (ret)
10624 			return ret;
10625 
10626 		if (!dc_remove_plane_from_context(
10627 				dc,
10628 				dm_old_crtc_state->stream,
10629 				dm_old_plane_state->dc_state,
10630 				dm_state->context)) {
10631 
10632 			return -EINVAL;
10633 		}
10634 
10635 
10636 		dc_plane_state_release(dm_old_plane_state->dc_state);
10637 		dm_new_plane_state->dc_state = NULL;
10638 
10639 		*lock_and_validation_needed = true;
10640 
10641 	} else { /* Add new planes */
10642 		struct dc_plane_state *dc_new_plane_state;
10643 
10644 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10645 			return 0;
10646 
10647 		if (!new_plane_crtc)
10648 			return 0;
10649 
10650 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10651 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10652 
10653 		if (!dm_new_crtc_state->stream)
10654 			return 0;
10655 
10656 		if (!needs_reset)
10657 			return 0;
10658 
10659 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10660 		if (ret)
10661 			return ret;
10662 
10663 		WARN_ON(dm_new_plane_state->dc_state);
10664 
10665 		dc_new_plane_state = dc_create_plane_state(dc);
10666 		if (!dc_new_plane_state)
10667 			return -ENOMEM;
10668 
10669 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10670 				 plane->base.id, new_plane_crtc->base.id);
10671 
10672 		ret = fill_dc_plane_attributes(
10673 			drm_to_adev(new_plane_crtc->dev),
10674 			dc_new_plane_state,
10675 			new_plane_state,
10676 			new_crtc_state);
10677 		if (ret) {
10678 			dc_plane_state_release(dc_new_plane_state);
10679 			return ret;
10680 		}
10681 
10682 		ret = dm_atomic_get_state(state, &dm_state);
10683 		if (ret) {
10684 			dc_plane_state_release(dc_new_plane_state);
10685 			return ret;
10686 		}
10687 
10688 		/*
10689 		 * Any atomic check errors that occur after this will
10690 		 * not need a release. The plane state will be attached
10691 		 * to the stream, and therefore part of the atomic
10692 		 * state. It'll be released when the atomic state is
10693 		 * cleaned.
10694 		 */
10695 		if (!dc_add_plane_to_context(
10696 				dc,
10697 				dm_new_crtc_state->stream,
10698 				dc_new_plane_state,
10699 				dm_state->context)) {
10700 
10701 			dc_plane_state_release(dc_new_plane_state);
10702 			return -EINVAL;
10703 		}
10704 
10705 		dm_new_plane_state->dc_state = dc_new_plane_state;
10706 
10707 		/* Tell DC to do a full surface update every time there
10708 		 * is a plane change. Inefficient, but works for now.
10709 		 */
10710 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10711 
10712 		*lock_and_validation_needed = true;
10713 	}
10714 
10715 
10716 	return ret;
10717 }
10718 
10719 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10720 				struct drm_crtc *crtc,
10721 				struct drm_crtc_state *new_crtc_state)
10722 {
10723 	struct drm_plane *cursor = crtc->cursor, *underlying;
10724 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10725 	int i;
10726 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10727 
10728 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10729 	 * cursor per pipe but it's going to inherit the scaling and
10730 	 * positioning from the underlying pipe. Check the cursor plane's
10731 	 * blending properties match the underlying planes'. */
10732 
10733 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10734 	if (!new_cursor_state || !new_cursor_state->fb) {
10735 		return 0;
10736 	}
10737 
10738 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10739 			 (new_cursor_state->src_w >> 16);
10740 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10741 			 (new_cursor_state->src_h >> 16);
10742 
10743 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10744 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10745 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10746 			continue;
10747 
10748 		/* Ignore disabled planes */
10749 		if (!new_underlying_state->fb)
10750 			continue;
10751 
10752 		underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10753 				     (new_underlying_state->src_w >> 16);
10754 		underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10755 				     (new_underlying_state->src_h >> 16);
10756 
10757 		if (cursor_scale_w != underlying_scale_w ||
10758 		    cursor_scale_h != underlying_scale_h) {
10759 			drm_dbg_atomic(crtc->dev,
10760 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10761 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10762 			return -EINVAL;
10763 		}
10764 
10765 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10766 		if (new_underlying_state->crtc_x <= 0 &&
10767 		    new_underlying_state->crtc_y <= 0 &&
10768 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10769 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10770 			break;
10771 	}
10772 
10773 	return 0;
10774 }
10775 
10776 #if defined(CONFIG_DRM_AMD_DC_DCN)
10777 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10778 {
10779 	struct drm_connector *connector;
10780 	struct drm_connector_state *conn_state;
10781 	struct amdgpu_dm_connector *aconnector = NULL;
10782 	int i;
10783 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10784 		if (conn_state->crtc != crtc)
10785 			continue;
10786 
10787 		aconnector = to_amdgpu_dm_connector(connector);
10788 		if (!aconnector->port || !aconnector->mst_port)
10789 			aconnector = NULL;
10790 		else
10791 			break;
10792 	}
10793 
10794 	if (!aconnector)
10795 		return 0;
10796 
10797 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10798 }
10799 #endif
10800 
10801 /**
10802  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10803  * @dev: The DRM device
10804  * @state: The atomic state to commit
10805  *
10806  * Validate that the given atomic state is programmable by DC into hardware.
10807  * This involves constructing a &struct dc_state reflecting the new hardware
10808  * state we wish to commit, then querying DC to see if it is programmable. It's
10809  * important not to modify the existing DC state. Otherwise, atomic_check
10810  * may unexpectedly commit hardware changes.
10811  *
10812  * When validating the DC state, it's important that the right locks are
10813  * acquired. For full updates case which removes/adds/updates streams on one
10814  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10815  * that any such full update commit will wait for completion of any outstanding
10816  * flip using DRMs synchronization events.
10817  *
10818  * Note that DM adds the affected connectors for all CRTCs in state, when that
10819  * might not seem necessary. This is because DC stream creation requires the
10820  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10821  * be possible but non-trivial - a possible TODO item.
10822  *
10823  * Return: -Error code if validation failed.
10824  */
10825 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10826 				  struct drm_atomic_state *state)
10827 {
10828 	struct amdgpu_device *adev = drm_to_adev(dev);
10829 	struct dm_atomic_state *dm_state = NULL;
10830 	struct dc *dc = adev->dm.dc;
10831 	struct drm_connector *connector;
10832 	struct drm_connector_state *old_con_state, *new_con_state;
10833 	struct drm_crtc *crtc;
10834 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10835 	struct drm_plane *plane;
10836 	struct drm_plane_state *old_plane_state, *new_plane_state;
10837 	enum dc_status status;
10838 	int ret, i;
10839 	bool lock_and_validation_needed = false;
10840 	struct dm_crtc_state *dm_old_crtc_state;
10841 #if defined(CONFIG_DRM_AMD_DC_DCN)
10842 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10843 	struct drm_dp_mst_topology_state *mst_state;
10844 	struct drm_dp_mst_topology_mgr *mgr;
10845 #endif
10846 
10847 	trace_amdgpu_dm_atomic_check_begin(state);
10848 
10849 	ret = drm_atomic_helper_check_modeset(dev, state);
10850 	if (ret) {
10851 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10852 		goto fail;
10853 	}
10854 
10855 	/* Check connector changes */
10856 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10857 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10858 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10859 
10860 		/* Skip connectors that are disabled or part of modeset already. */
10861 		if (!old_con_state->crtc && !new_con_state->crtc)
10862 			continue;
10863 
10864 		if (!new_con_state->crtc)
10865 			continue;
10866 
10867 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10868 		if (IS_ERR(new_crtc_state)) {
10869 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10870 			ret = PTR_ERR(new_crtc_state);
10871 			goto fail;
10872 		}
10873 
10874 		if (dm_old_con_state->abm_level !=
10875 		    dm_new_con_state->abm_level)
10876 			new_crtc_state->connectors_changed = true;
10877 	}
10878 
10879 #if defined(CONFIG_DRM_AMD_DC_DCN)
10880 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10881 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10882 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10883 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10884 				if (ret) {
10885 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10886 					goto fail;
10887 				}
10888 			}
10889 		}
10890 	}
10891 #endif
10892 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10893 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10894 
10895 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10896 		    !new_crtc_state->color_mgmt_changed &&
10897 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10898 			dm_old_crtc_state->dsc_force_changed == false)
10899 			continue;
10900 
10901 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10902 		if (ret) {
10903 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10904 			goto fail;
10905 		}
10906 
10907 		if (!new_crtc_state->enable)
10908 			continue;
10909 
10910 		ret = drm_atomic_add_affected_connectors(state, crtc);
10911 		if (ret) {
10912 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10913 			goto fail;
10914 		}
10915 
10916 		ret = drm_atomic_add_affected_planes(state, crtc);
10917 		if (ret) {
10918 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10919 			goto fail;
10920 		}
10921 
10922 		if (dm_old_crtc_state->dsc_force_changed)
10923 			new_crtc_state->mode_changed = true;
10924 	}
10925 
10926 	/*
10927 	 * Add all primary and overlay planes on the CRTC to the state
10928 	 * whenever a plane is enabled to maintain correct z-ordering
10929 	 * and to enable fast surface updates.
10930 	 */
10931 	drm_for_each_crtc(crtc, dev) {
10932 		bool modified = false;
10933 
10934 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10935 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10936 				continue;
10937 
10938 			if (new_plane_state->crtc == crtc ||
10939 			    old_plane_state->crtc == crtc) {
10940 				modified = true;
10941 				break;
10942 			}
10943 		}
10944 
10945 		if (!modified)
10946 			continue;
10947 
10948 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10949 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10950 				continue;
10951 
10952 			new_plane_state =
10953 				drm_atomic_get_plane_state(state, plane);
10954 
10955 			if (IS_ERR(new_plane_state)) {
10956 				ret = PTR_ERR(new_plane_state);
10957 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
10958 				goto fail;
10959 			}
10960 		}
10961 	}
10962 
10963 	/* Remove exiting planes if they are modified */
10964 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10965 		ret = dm_update_plane_state(dc, state, plane,
10966 					    old_plane_state,
10967 					    new_plane_state,
10968 					    false,
10969 					    &lock_and_validation_needed);
10970 		if (ret) {
10971 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10972 			goto fail;
10973 		}
10974 	}
10975 
10976 	/* Disable all crtcs which require disable */
10977 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10978 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10979 					   old_crtc_state,
10980 					   new_crtc_state,
10981 					   false,
10982 					   &lock_and_validation_needed);
10983 		if (ret) {
10984 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
10985 			goto fail;
10986 		}
10987 	}
10988 
10989 	/* Enable all crtcs which require enable */
10990 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10991 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10992 					   old_crtc_state,
10993 					   new_crtc_state,
10994 					   true,
10995 					   &lock_and_validation_needed);
10996 		if (ret) {
10997 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
10998 			goto fail;
10999 		}
11000 	}
11001 
11002 	/* Add new/modified planes */
11003 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11004 		ret = dm_update_plane_state(dc, state, plane,
11005 					    old_plane_state,
11006 					    new_plane_state,
11007 					    true,
11008 					    &lock_and_validation_needed);
11009 		if (ret) {
11010 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11011 			goto fail;
11012 		}
11013 	}
11014 
11015 	/* Run this here since we want to validate the streams we created */
11016 	ret = drm_atomic_helper_check_planes(dev, state);
11017 	if (ret) {
11018 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11019 		goto fail;
11020 	}
11021 
11022 	/* Check cursor planes scaling */
11023 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11024 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11025 		if (ret) {
11026 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11027 			goto fail;
11028 		}
11029 	}
11030 
11031 	if (state->legacy_cursor_update) {
11032 		/*
11033 		 * This is a fast cursor update coming from the plane update
11034 		 * helper, check if it can be done asynchronously for better
11035 		 * performance.
11036 		 */
11037 		state->async_update =
11038 			!drm_atomic_helper_async_check(dev, state);
11039 
11040 		/*
11041 		 * Skip the remaining global validation if this is an async
11042 		 * update. Cursor updates can be done without affecting
11043 		 * state or bandwidth calcs and this avoids the performance
11044 		 * penalty of locking the private state object and
11045 		 * allocating a new dc_state.
11046 		 */
11047 		if (state->async_update)
11048 			return 0;
11049 	}
11050 
11051 	/* Check scaling and underscan changes*/
11052 	/* TODO Removed scaling changes validation due to inability to commit
11053 	 * new stream into context w\o causing full reset. Need to
11054 	 * decide how to handle.
11055 	 */
11056 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11057 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11058 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11059 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11060 
11061 		/* Skip any modesets/resets */
11062 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11063 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11064 			continue;
11065 
11066 		/* Skip any thing not scale or underscan changes */
11067 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11068 			continue;
11069 
11070 		lock_and_validation_needed = true;
11071 	}
11072 
11073 #if defined(CONFIG_DRM_AMD_DC_DCN)
11074 	/* set the slot info for each mst_state based on the link encoding format */
11075 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11076 		struct amdgpu_dm_connector *aconnector;
11077 		struct drm_connector *connector;
11078 		struct drm_connector_list_iter iter;
11079 		u8 link_coding_cap;
11080 
11081 		if (!mgr->mst_state )
11082 			continue;
11083 
11084 		drm_connector_list_iter_begin(dev, &iter);
11085 		drm_for_each_connector_iter(connector, &iter) {
11086 			int id = connector->index;
11087 
11088 			if (id == mst_state->mgr->conn_base_id) {
11089 				aconnector = to_amdgpu_dm_connector(connector);
11090 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11091 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11092 
11093 				break;
11094 			}
11095 		}
11096 		drm_connector_list_iter_end(&iter);
11097 
11098 	}
11099 #endif
11100 	/**
11101 	 * Streams and planes are reset when there are changes that affect
11102 	 * bandwidth. Anything that affects bandwidth needs to go through
11103 	 * DC global validation to ensure that the configuration can be applied
11104 	 * to hardware.
11105 	 *
11106 	 * We have to currently stall out here in atomic_check for outstanding
11107 	 * commits to finish in this case because our IRQ handlers reference
11108 	 * DRM state directly - we can end up disabling interrupts too early
11109 	 * if we don't.
11110 	 *
11111 	 * TODO: Remove this stall and drop DM state private objects.
11112 	 */
11113 	if (lock_and_validation_needed) {
11114 		ret = dm_atomic_get_state(state, &dm_state);
11115 		if (ret) {
11116 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11117 			goto fail;
11118 		}
11119 
11120 		ret = do_aquire_global_lock(dev, state);
11121 		if (ret) {
11122 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11123 			goto fail;
11124 		}
11125 
11126 #if defined(CONFIG_DRM_AMD_DC_DCN)
11127 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11128 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11129 			goto fail;
11130 		}
11131 
11132 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11133 		if (ret) {
11134 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11135 			goto fail;
11136 		}
11137 #endif
11138 
11139 		/*
11140 		 * Perform validation of MST topology in the state:
11141 		 * We need to perform MST atomic check before calling
11142 		 * dc_validate_global_state(), or there is a chance
11143 		 * to get stuck in an infinite loop and hang eventually.
11144 		 */
11145 		ret = drm_dp_mst_atomic_check(state);
11146 		if (ret) {
11147 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11148 			goto fail;
11149 		}
11150 		status = dc_validate_global_state(dc, dm_state->context, true);
11151 		if (status != DC_OK) {
11152 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11153 				       dc_status_to_str(status), status);
11154 			ret = -EINVAL;
11155 			goto fail;
11156 		}
11157 	} else {
11158 		/*
11159 		 * The commit is a fast update. Fast updates shouldn't change
11160 		 * the DC context, affect global validation, and can have their
11161 		 * commit work done in parallel with other commits not touching
11162 		 * the same resource. If we have a new DC context as part of
11163 		 * the DM atomic state from validation we need to free it and
11164 		 * retain the existing one instead.
11165 		 *
11166 		 * Furthermore, since the DM atomic state only contains the DC
11167 		 * context and can safely be annulled, we can free the state
11168 		 * and clear the associated private object now to free
11169 		 * some memory and avoid a possible use-after-free later.
11170 		 */
11171 
11172 		for (i = 0; i < state->num_private_objs; i++) {
11173 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11174 
11175 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11176 				int j = state->num_private_objs-1;
11177 
11178 				dm_atomic_destroy_state(obj,
11179 						state->private_objs[i].state);
11180 
11181 				/* If i is not at the end of the array then the
11182 				 * last element needs to be moved to where i was
11183 				 * before the array can safely be truncated.
11184 				 */
11185 				if (i != j)
11186 					state->private_objs[i] =
11187 						state->private_objs[j];
11188 
11189 				state->private_objs[j].ptr = NULL;
11190 				state->private_objs[j].state = NULL;
11191 				state->private_objs[j].old_state = NULL;
11192 				state->private_objs[j].new_state = NULL;
11193 
11194 				state->num_private_objs = j;
11195 				break;
11196 			}
11197 		}
11198 	}
11199 
11200 	/* Store the overall update type for use later in atomic check. */
11201 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11202 		struct dm_crtc_state *dm_new_crtc_state =
11203 			to_dm_crtc_state(new_crtc_state);
11204 
11205 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11206 							 UPDATE_TYPE_FULL :
11207 							 UPDATE_TYPE_FAST;
11208 	}
11209 
11210 	/* Must be success */
11211 	WARN_ON(ret);
11212 
11213 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11214 
11215 	return ret;
11216 
11217 fail:
11218 	if (ret == -EDEADLK)
11219 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11220 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11221 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11222 	else
11223 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11224 
11225 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11226 
11227 	return ret;
11228 }
11229 
11230 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11231 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11232 {
11233 	uint8_t dpcd_data;
11234 	bool capable = false;
11235 
11236 	if (amdgpu_dm_connector->dc_link &&
11237 		dm_helpers_dp_read_dpcd(
11238 				NULL,
11239 				amdgpu_dm_connector->dc_link,
11240 				DP_DOWN_STREAM_PORT_COUNT,
11241 				&dpcd_data,
11242 				sizeof(dpcd_data))) {
11243 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11244 	}
11245 
11246 	return capable;
11247 }
11248 
11249 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11250 		unsigned int offset,
11251 		unsigned int total_length,
11252 		uint8_t *data,
11253 		unsigned int length,
11254 		struct amdgpu_hdmi_vsdb_info *vsdb)
11255 {
11256 	bool res;
11257 	union dmub_rb_cmd cmd;
11258 	struct dmub_cmd_send_edid_cea *input;
11259 	struct dmub_cmd_edid_cea_output *output;
11260 
11261 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11262 		return false;
11263 
11264 	memset(&cmd, 0, sizeof(cmd));
11265 
11266 	input = &cmd.edid_cea.data.input;
11267 
11268 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11269 	cmd.edid_cea.header.sub_type = 0;
11270 	cmd.edid_cea.header.payload_bytes =
11271 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11272 	input->offset = offset;
11273 	input->length = length;
11274 	input->total_length = total_length;
11275 	memcpy(input->payload, data, length);
11276 
11277 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11278 	if (!res) {
11279 		DRM_ERROR("EDID CEA parser failed\n");
11280 		return false;
11281 	}
11282 
11283 	output = &cmd.edid_cea.data.output;
11284 
11285 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11286 		if (!output->ack.success) {
11287 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11288 					output->ack.offset);
11289 		}
11290 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11291 		if (!output->amd_vsdb.vsdb_found)
11292 			return false;
11293 
11294 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11295 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11296 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11297 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11298 	} else {
11299 		DRM_WARN("Unknown EDID CEA parser results\n");
11300 		return false;
11301 	}
11302 
11303 	return true;
11304 }
11305 
11306 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11307 		uint8_t *edid_ext, int len,
11308 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11309 {
11310 	int i;
11311 
11312 	/* send extension block to DMCU for parsing */
11313 	for (i = 0; i < len; i += 8) {
11314 		bool res;
11315 		int offset;
11316 
11317 		/* send 8 bytes a time */
11318 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11319 			return false;
11320 
11321 		if (i+8 == len) {
11322 			/* EDID block sent completed, expect result */
11323 			int version, min_rate, max_rate;
11324 
11325 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11326 			if (res) {
11327 				/* amd vsdb found */
11328 				vsdb_info->freesync_supported = 1;
11329 				vsdb_info->amd_vsdb_version = version;
11330 				vsdb_info->min_refresh_rate_hz = min_rate;
11331 				vsdb_info->max_refresh_rate_hz = max_rate;
11332 				return true;
11333 			}
11334 			/* not amd vsdb */
11335 			return false;
11336 		}
11337 
11338 		/* check for ack*/
11339 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11340 		if (!res)
11341 			return false;
11342 	}
11343 
11344 	return false;
11345 }
11346 
11347 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11348 		uint8_t *edid_ext, int len,
11349 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11350 {
11351 	int i;
11352 
11353 	/* send extension block to DMCU for parsing */
11354 	for (i = 0; i < len; i += 8) {
11355 		/* send 8 bytes a time */
11356 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11357 			return false;
11358 	}
11359 
11360 	return vsdb_info->freesync_supported;
11361 }
11362 
11363 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11364 		uint8_t *edid_ext, int len,
11365 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11366 {
11367 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11368 
11369 	if (adev->dm.dmub_srv)
11370 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11371 	else
11372 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11373 }
11374 
11375 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11376 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11377 {
11378 	uint8_t *edid_ext = NULL;
11379 	int i;
11380 	bool valid_vsdb_found = false;
11381 
11382 	/*----- drm_find_cea_extension() -----*/
11383 	/* No EDID or EDID extensions */
11384 	if (edid == NULL || edid->extensions == 0)
11385 		return -ENODEV;
11386 
11387 	/* Find CEA extension */
11388 	for (i = 0; i < edid->extensions; i++) {
11389 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11390 		if (edid_ext[0] == CEA_EXT)
11391 			break;
11392 	}
11393 
11394 	if (i == edid->extensions)
11395 		return -ENODEV;
11396 
11397 	/*----- cea_db_offsets() -----*/
11398 	if (edid_ext[0] != CEA_EXT)
11399 		return -ENODEV;
11400 
11401 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11402 
11403 	return valid_vsdb_found ? i : -ENODEV;
11404 }
11405 
11406 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11407 					struct edid *edid)
11408 {
11409 	int i = 0;
11410 	struct detailed_timing *timing;
11411 	struct detailed_non_pixel *data;
11412 	struct detailed_data_monitor_range *range;
11413 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11414 			to_amdgpu_dm_connector(connector);
11415 	struct dm_connector_state *dm_con_state = NULL;
11416 	struct dc_sink *sink;
11417 
11418 	struct drm_device *dev = connector->dev;
11419 	struct amdgpu_device *adev = drm_to_adev(dev);
11420 	bool freesync_capable = false;
11421 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11422 
11423 	if (!connector->state) {
11424 		DRM_ERROR("%s - Connector has no state", __func__);
11425 		goto update;
11426 	}
11427 
11428 	sink = amdgpu_dm_connector->dc_sink ?
11429 		amdgpu_dm_connector->dc_sink :
11430 		amdgpu_dm_connector->dc_em_sink;
11431 
11432 	if (!edid || !sink) {
11433 		dm_con_state = to_dm_connector_state(connector->state);
11434 
11435 		amdgpu_dm_connector->min_vfreq = 0;
11436 		amdgpu_dm_connector->max_vfreq = 0;
11437 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11438 		connector->display_info.monitor_range.min_vfreq = 0;
11439 		connector->display_info.monitor_range.max_vfreq = 0;
11440 		freesync_capable = false;
11441 
11442 		goto update;
11443 	}
11444 
11445 	dm_con_state = to_dm_connector_state(connector->state);
11446 
11447 	if (!adev->dm.freesync_module)
11448 		goto update;
11449 
11450 
11451 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11452 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11453 		bool edid_check_required = false;
11454 
11455 		if (edid) {
11456 			edid_check_required = is_dp_capable_without_timing_msa(
11457 						adev->dm.dc,
11458 						amdgpu_dm_connector);
11459 		}
11460 
11461 		if (edid_check_required == true && (edid->version > 1 ||
11462 		   (edid->version == 1 && edid->revision > 1))) {
11463 			for (i = 0; i < 4; i++) {
11464 
11465 				timing	= &edid->detailed_timings[i];
11466 				data	= &timing->data.other_data;
11467 				range	= &data->data.range;
11468 				/*
11469 				 * Check if monitor has continuous frequency mode
11470 				 */
11471 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11472 					continue;
11473 				/*
11474 				 * Check for flag range limits only. If flag == 1 then
11475 				 * no additional timing information provided.
11476 				 * Default GTF, GTF Secondary curve and CVT are not
11477 				 * supported
11478 				 */
11479 				if (range->flags != 1)
11480 					continue;
11481 
11482 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11483 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11484 				amdgpu_dm_connector->pixel_clock_mhz =
11485 					range->pixel_clock_mhz * 10;
11486 
11487 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11488 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11489 
11490 				break;
11491 			}
11492 
11493 			if (amdgpu_dm_connector->max_vfreq -
11494 			    amdgpu_dm_connector->min_vfreq > 10) {
11495 
11496 				freesync_capable = true;
11497 			}
11498 		}
11499 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11500 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11501 		if (i >= 0 && vsdb_info.freesync_supported) {
11502 			timing  = &edid->detailed_timings[i];
11503 			data    = &timing->data.other_data;
11504 
11505 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11506 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11507 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11508 				freesync_capable = true;
11509 
11510 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11511 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11512 		}
11513 	}
11514 
11515 update:
11516 	if (dm_con_state)
11517 		dm_con_state->freesync_capable = freesync_capable;
11518 
11519 	if (connector->vrr_capable_property)
11520 		drm_connector_set_vrr_capable_property(connector,
11521 						       freesync_capable);
11522 }
11523 
11524 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11525 {
11526 	struct amdgpu_device *adev = drm_to_adev(dev);
11527 	struct dc *dc = adev->dm.dc;
11528 	int i;
11529 
11530 	mutex_lock(&adev->dm.dc_lock);
11531 	if (dc->current_state) {
11532 		for (i = 0; i < dc->current_state->stream_count; ++i)
11533 			dc->current_state->streams[i]
11534 				->triggered_crtc_reset.enabled =
11535 				adev->dm.force_timing_sync;
11536 
11537 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11538 		dc_trigger_sync(dc, dc->current_state);
11539 	}
11540 	mutex_unlock(&adev->dm.dc_lock);
11541 }
11542 
11543 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11544 		       uint32_t value, const char *func_name)
11545 {
11546 #ifdef DM_CHECK_ADDR_0
11547 	if (address == 0) {
11548 		DC_ERR("invalid register write. address = 0");
11549 		return;
11550 	}
11551 #endif
11552 	cgs_write_register(ctx->cgs_device, address, value);
11553 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11554 }
11555 
11556 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11557 			  const char *func_name)
11558 {
11559 	uint32_t value;
11560 #ifdef DM_CHECK_ADDR_0
11561 	if (address == 0) {
11562 		DC_ERR("invalid register read; address = 0\n");
11563 		return 0;
11564 	}
11565 #endif
11566 
11567 	if (ctx->dmub_srv &&
11568 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11569 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11570 		ASSERT(false);
11571 		return 0;
11572 	}
11573 
11574 	value = cgs_read_register(ctx->cgs_device, address);
11575 
11576 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11577 
11578 	return value;
11579 }
11580 
11581 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11582 	uint8_t status_type, uint32_t *operation_result)
11583 {
11584 	struct amdgpu_device *adev = ctx->driver_context;
11585 	int return_status = -1;
11586 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11587 
11588 	if (is_cmd_aux) {
11589 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11590 			return_status = p_notify->aux_reply.length;
11591 			*operation_result = p_notify->result;
11592 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11593 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11594 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11595 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11596 		} else {
11597 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11598 		}
11599 	} else {
11600 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11601 			return_status = 0;
11602 			*operation_result = p_notify->sc_status;
11603 		} else {
11604 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11605 		}
11606 	}
11607 
11608 	return return_status;
11609 }
11610 
11611 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11612 	unsigned int link_index, void *cmd_payload, void *operation_result)
11613 {
11614 	struct amdgpu_device *adev = ctx->driver_context;
11615 	int ret = 0;
11616 
11617 	if (is_cmd_aux) {
11618 		dc_process_dmub_aux_transfer_async(ctx->dc,
11619 			link_index, (struct aux_payload *)cmd_payload);
11620 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11621 					(struct set_config_cmd_payload *)cmd_payload,
11622 					adev->dm.dmub_notify)) {
11623 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11624 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11625 					(uint32_t *)operation_result);
11626 	}
11627 
11628 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11629 	if (ret == 0) {
11630 		DRM_ERROR("wait_for_completion_timeout timeout!");
11631 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11632 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11633 				(uint32_t *)operation_result);
11634 	}
11635 
11636 	if (is_cmd_aux) {
11637 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11638 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11639 
11640 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11641 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11642 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11643 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11644 				       adev->dm.dmub_notify->aux_reply.length);
11645 			}
11646 		}
11647 	}
11648 
11649 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11650 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11651 			(uint32_t *)operation_result);
11652 }
11653 
11654 /*
11655  * Check whether seamless boot is supported.
11656  *
11657  * So far we only support seamless boot on CHIP_VANGOGH.
11658  * If everything goes well, we may consider expanding
11659  * seamless boot to other ASICs.
11660  */
11661 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11662 {
11663 	switch (adev->asic_type) {
11664 	case CHIP_VANGOGH:
11665 		if (!adev->mman.keep_stolen_vga_memory)
11666 			return true;
11667 		break;
11668 	default:
11669 		break;
11670 	}
11671 
11672 	return false;
11673 }
11674