xref: /openbmc/linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 8b681bd7c301c423fbe97a6b23388a2180ff04ca)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85 
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93 
94 #include "soc15_common.h"
95 #endif
96 
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100 
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 
118 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120 
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123 
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126 
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129 
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139 
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144 
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147 	switch (link->dpcd_caps.dongle_type) {
148 	case DISPLAY_DONGLE_NONE:
149 		return DRM_MODE_SUBCONNECTOR_Native;
150 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 		return DRM_MODE_SUBCONNECTOR_VGA;
152 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 		return DRM_MODE_SUBCONNECTOR_DVID;
155 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 		return DRM_MODE_SUBCONNECTOR_HDMIA;
158 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 	default:
160 		return DRM_MODE_SUBCONNECTOR_Unknown;
161 	}
162 }
163 
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166 	struct dc_link *link = aconnector->dc_link;
167 	struct drm_connector *connector = &aconnector->base;
168 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169 
170 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 		return;
172 
173 	if (aconnector->dc_sink)
174 		subconnector = get_subconnector_type(link);
175 
176 	drm_object_property_set_value(&connector->base,
177 			connector->dev->mode_config.dp_subconnector_property,
178 			subconnector);
179 }
180 
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191 
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193 				struct drm_plane *plane,
194 				unsigned long possible_crtcs,
195 				const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 			       struct drm_plane *plane,
198 			       uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
201 				    uint32_t link_index,
202 				    struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 				  struct amdgpu_encoder *aencoder,
205 				  uint32_t link_index);
206 
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208 
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210 
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 				  struct drm_atomic_state *state);
213 
214 static void handle_cursor_update(struct drm_plane *plane,
215 				 struct drm_plane_state *old_plane_state);
216 
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
222 
223 static bool
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 				 struct drm_crtc_state *new_crtc_state);
226 /*
227  * dm_vblank_get_counter
228  *
229  * @brief
230  * Get counter for number of vertical blanks
231  *
232  * @param
233  * struct amdgpu_device *adev - [in] desired amdgpu device
234  * int disp_idx - [in] which CRTC to get the counter from
235  *
236  * @return
237  * Counter for vertical blanks
238  */
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240 {
241 	if (crtc >= adev->mode_info.num_crtc)
242 		return 0;
243 	else {
244 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245 
246 		if (acrtc->dm_irq_params.stream == NULL) {
247 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248 				  crtc);
249 			return 0;
250 		}
251 
252 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253 	}
254 }
255 
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 				  u32 *vbl, u32 *position)
258 {
259 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
260 
261 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 		return -EINVAL;
263 	else {
264 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265 
266 		if (acrtc->dm_irq_params.stream ==  NULL) {
267 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 				  crtc);
269 			return 0;
270 		}
271 
272 		/*
273 		 * TODO rework base driver to use values directly.
274 		 * for now parse it back into reg-format
275 		 */
276 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277 					 &v_blank_start,
278 					 &v_blank_end,
279 					 &h_position,
280 					 &v_position);
281 
282 		*position = v_position | (h_position << 16);
283 		*vbl = v_blank_start | (v_blank_end << 16);
284 	}
285 
286 	return 0;
287 }
288 
289 static bool dm_is_idle(void *handle)
290 {
291 	/* XXX todo */
292 	return true;
293 }
294 
295 static int dm_wait_for_idle(void *handle)
296 {
297 	/* XXX todo */
298 	return 0;
299 }
300 
301 static bool dm_check_soft_reset(void *handle)
302 {
303 	return false;
304 }
305 
306 static int dm_soft_reset(void *handle)
307 {
308 	/* XXX todo */
309 	return 0;
310 }
311 
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 		     int otg_inst)
315 {
316 	struct drm_device *dev = adev_to_drm(adev);
317 	struct drm_crtc *crtc;
318 	struct amdgpu_crtc *amdgpu_crtc;
319 
320 	if (WARN_ON(otg_inst == -1))
321 		return adev->mode_info.crtcs[0];
322 
323 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 		amdgpu_crtc = to_amdgpu_crtc(crtc);
325 
326 		if (amdgpu_crtc->otg_inst == otg_inst)
327 			return amdgpu_crtc;
328 	}
329 
330 	return NULL;
331 }
332 
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335 	return acrtc->dm_irq_params.freesync_config.state ==
336 		       VRR_STATE_ACTIVE_VARIABLE ||
337 	       acrtc->dm_irq_params.freesync_config.state ==
338 		       VRR_STATE_ACTIVE_FIXED;
339 }
340 
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346 
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 					      struct dm_crtc_state *new_state)
349 {
350 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351 		return true;
352 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 		return true;
354 	else
355 		return false;
356 }
357 
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367 	struct amdgpu_crtc *amdgpu_crtc;
368 	struct common_irq_params *irq_params = interrupt_params;
369 	struct amdgpu_device *adev = irq_params->adev;
370 	unsigned long flags;
371 	struct drm_pending_vblank_event *e;
372 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 	bool vrr_active;
374 
375 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376 
377 	/* IRQ could occur when in initial stage */
378 	/* TODO work and BO cleanup */
379 	if (amdgpu_crtc == NULL) {
380 		DC_LOG_PFLIP("CRTC is null, returning.\n");
381 		return;
382 	}
383 
384 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385 
386 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388 						 amdgpu_crtc->pflip_status,
389 						 AMDGPU_FLIP_SUBMITTED,
390 						 amdgpu_crtc->crtc_id,
391 						 amdgpu_crtc);
392 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393 		return;
394 	}
395 
396 	/* page flip completed. */
397 	e = amdgpu_crtc->event;
398 	amdgpu_crtc->event = NULL;
399 
400 	WARN_ON(!e);
401 
402 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403 
404 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
405 	if (!vrr_active ||
406 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 				      &v_blank_end, &hpos, &vpos) ||
408 	    (vpos < v_blank_start)) {
409 		/* Update to correct count and vblank timestamp if racing with
410 		 * vblank irq. This also updates to the correct vblank timestamp
411 		 * even in VRR mode, as scanout is past the front-porch atm.
412 		 */
413 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414 
415 		/* Wake up userspace by sending the pageflip event with proper
416 		 * count and timestamp of vblank of flip completion.
417 		 */
418 		if (e) {
419 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420 
421 			/* Event sent, so done with vblank for this flip */
422 			drm_crtc_vblank_put(&amdgpu_crtc->base);
423 		}
424 	} else if (e) {
425 		/* VRR active and inside front-porch: vblank count and
426 		 * timestamp for pageflip event will only be up to date after
427 		 * drm_crtc_handle_vblank() has been executed from late vblank
428 		 * irq handler after start of back-porch (vline 0). We queue the
429 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 		 * updated timestamp and count, once it runs after us.
431 		 *
432 		 * We need to open-code this instead of using the helper
433 		 * drm_crtc_arm_vblank_event(), as that helper would
434 		 * call drm_crtc_accurate_vblank_count(), which we must
435 		 * not call in VRR mode while we are in front-porch!
436 		 */
437 
438 		/* sequence will be replaced by real count during send-out. */
439 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 		e->pipe = amdgpu_crtc->crtc_id;
441 
442 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443 		e = NULL;
444 	}
445 
446 	/* Keep track of vblank of this flip for flip throttling. We use the
447 	 * cooked hw counter, as that one incremented at start of this vblank
448 	 * of pageflip completion, so last_flip_vblank is the forbidden count
449 	 * for queueing new pageflips if vsync + VRR is enabled.
450 	 */
451 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453 
454 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456 
457 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
459 		     vrr_active, (int) !e);
460 }
461 
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464 	struct common_irq_params *irq_params = interrupt_params;
465 	struct amdgpu_device *adev = irq_params->adev;
466 	struct amdgpu_crtc *acrtc;
467 	struct drm_device *drm_dev;
468 	struct drm_vblank_crtc *vblank;
469 	ktime_t frame_duration_ns, previous_timestamp;
470 	unsigned long flags;
471 	int vrr_active;
472 
473 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474 
475 	if (acrtc) {
476 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 		drm_dev = acrtc->base.dev;
478 		vblank = &drm_dev->vblank[acrtc->base.index];
479 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 		frame_duration_ns = vblank->time - previous_timestamp;
481 
482 		if (frame_duration_ns > 0) {
483 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
484 						frame_duration_ns,
485 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 		}
488 
489 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490 			      acrtc->crtc_id,
491 			      vrr_active);
492 
493 		/* Core vblank handling is done here after end of front-porch in
494 		 * vrr mode, as vblank timestamping will give valid results
495 		 * while now done after front-porch. This will also deliver
496 		 * page-flip completion events that have been queued to us
497 		 * if a pageflip happened inside front-porch.
498 		 */
499 		if (vrr_active) {
500 			drm_crtc_handle_vblank(&acrtc->base);
501 
502 			/* BTR processing for pre-DCE12 ASICs */
503 			if (acrtc->dm_irq_params.stream &&
504 			    adev->family < AMDGPU_FAMILY_AI) {
505 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 				mod_freesync_handle_v_update(
507 				    adev->dm.freesync_module,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params);
510 
511 				dc_stream_adjust_vmin_vmax(
512 				    adev->dm.dc,
513 				    acrtc->dm_irq_params.stream,
514 				    &acrtc->dm_irq_params.vrr_params.adjust);
515 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516 			}
517 		}
518 	}
519 }
520 
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530 	struct common_irq_params *irq_params = interrupt_params;
531 	struct amdgpu_device *adev = irq_params->adev;
532 	struct amdgpu_crtc *acrtc;
533 	unsigned long flags;
534 	int vrr_active;
535 
536 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537 	if (!acrtc)
538 		return;
539 
540 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541 
542 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 		      vrr_active, acrtc->dm_irq_params.active_planes);
544 
545 	/**
546 	 * Core vblank handling at start of front-porch is only possible
547 	 * in non-vrr mode, as only there vblank timestamping will give
548 	 * valid results while done in front-porch. Otherwise defer it
549 	 * to dm_vupdate_high_irq after end of front-porch.
550 	 */
551 	if (!vrr_active)
552 		drm_crtc_handle_vblank(&acrtc->base);
553 
554 	/**
555 	 * Following stuff must happen at start of vblank, for crc
556 	 * computation and below-the-range btr support in vrr mode.
557 	 */
558 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559 
560 	/* BTR updates need to happen before VUPDATE on Vega and above. */
561 	if (adev->family < AMDGPU_FAMILY_AI)
562 		return;
563 
564 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565 
566 	if (acrtc->dm_irq_params.stream &&
567 	    acrtc->dm_irq_params.vrr_params.supported &&
568 	    acrtc->dm_irq_params.freesync_config.state ==
569 		    VRR_STATE_ACTIVE_VARIABLE) {
570 		mod_freesync_handle_v_update(adev->dm.freesync_module,
571 					     acrtc->dm_irq_params.stream,
572 					     &acrtc->dm_irq_params.vrr_params);
573 
574 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 					   &acrtc->dm_irq_params.vrr_params.adjust);
576 	}
577 
578 	/*
579 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 	 * In that case, pageflip completion interrupts won't fire and pageflip
581 	 * completion events won't get delivered. Prevent this by sending
582 	 * pending pageflip events from here if a flip is still pending.
583 	 *
584 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 	 * avoid race conditions between flip programming and completion,
586 	 * which could cause too early flip completion events.
587 	 */
588 	if (adev->family >= AMDGPU_FAMILY_RV &&
589 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 	    acrtc->dm_irq_params.active_planes == 0) {
591 		if (acrtc->event) {
592 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593 			acrtc->event = NULL;
594 			drm_crtc_vblank_put(&acrtc->base);
595 		}
596 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 	}
598 
599 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601 
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt_params: interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613 	struct common_irq_params *irq_params = interrupt_params;
614 	struct amdgpu_device *adev = irq_params->adev;
615 	struct amdgpu_crtc *acrtc;
616 
617 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618 
619 	if (!acrtc)
620 		return;
621 
622 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625 
626 /**
627  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628  * @adev: amdgpu_device pointer
629  * @notify: dmub notification structure
630  *
631  * Dmub AUX or SET_CONFIG command completion processing callback
632  * Copies dmub notification to DM which is to be read by AUX command.
633  * issuing thread and also signals the event to wake up the thread.
634  */
635 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
636 {
637 	if (adev->dm.dmub_notify)
638 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640 		complete(&adev->dm.dmub_aux_transfer_done);
641 }
642 
643 /**
644  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645  * @adev: amdgpu_device pointer
646  * @notify: dmub notification structure
647  *
648  * Dmub Hpd interrupt processing callback. Gets displayindex through the
649  * ink index and calls helper to do the processing.
650  */
651 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
652 {
653 	struct amdgpu_dm_connector *aconnector;
654 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
655 	struct drm_connector *connector;
656 	struct drm_connector_list_iter iter;
657 	struct dc_link *link;
658 	uint8_t link_index = 0;
659 	struct drm_device *dev = adev->dm.ddev;
660 
661 	if (adev == NULL)
662 		return;
663 
664 	if (notify == NULL) {
665 		DRM_ERROR("DMUB HPD callback notification was NULL");
666 		return;
667 	}
668 
669 	if (notify->link_index > adev->dm.dc->link_count) {
670 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671 		return;
672 	}
673 
674 	link_index = notify->link_index;
675 	link = adev->dm.dc->links[link_index];
676 
677 	drm_connector_list_iter_begin(dev, &iter);
678 	drm_for_each_connector_iter(connector, &iter) {
679 		aconnector = to_amdgpu_dm_connector(connector);
680 		if (link && aconnector->dc_link == link) {
681 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
682 			hpd_aconnector = aconnector;
683 			break;
684 		}
685 	}
686 	drm_connector_list_iter_end(&iter);
687 
688 	if (hpd_aconnector) {
689 		if (notify->type == DMUB_NOTIFICATION_HPD)
690 			handle_hpd_irq_helper(hpd_aconnector);
691 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692 			handle_hpd_rx_irq(hpd_aconnector);
693 	}
694 }
695 
696 /**
697  * register_dmub_notify_callback - Sets callback for DMUB notify
698  * @adev: amdgpu_device pointer
699  * @type: Type of dmub notification
700  * @callback: Dmub interrupt callback function
701  * @dmub_int_thread_offload: offload indicator
702  *
703  * API to register a dmub callback handler for a dmub notification
704  * Also sets indicator whether callback processing to be offloaded.
705  * to dmub interrupt handling thread
706  * Return: true if successfully registered, false if there is existing registration
707  */
708 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
710 {
711 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712 		adev->dm.dmub_callback[type] = callback;
713 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
714 	} else
715 		return false;
716 
717 	return true;
718 }
719 
720 static void dm_handle_hpd_work(struct work_struct *work)
721 {
722 	struct dmub_hpd_work *dmub_hpd_wrk;
723 
724 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
725 
726 	if (!dmub_hpd_wrk->dmub_notify) {
727 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
728 		return;
729 	}
730 
731 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733 		dmub_hpd_wrk->dmub_notify);
734 	}
735 
736 	kfree(dmub_hpd_wrk->dmub_notify);
737 	kfree(dmub_hpd_wrk);
738 
739 }
740 
741 #define DMUB_TRACE_MAX_READ 64
742 /**
743  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744  * @interrupt_params: used for determining the Outbox instance
745  *
746  * Handles the Outbox Interrupt
747  * event handler.
748  */
749 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
750 {
751 	struct dmub_notification notify;
752 	struct common_irq_params *irq_params = interrupt_params;
753 	struct amdgpu_device *adev = irq_params->adev;
754 	struct amdgpu_display_manager *dm = &adev->dm;
755 	struct dmcub_trace_buf_entry entry = { 0 };
756 	uint32_t count = 0;
757 	struct dmub_hpd_work *dmub_hpd_wrk;
758 	struct dc_link *plink = NULL;
759 
760 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
761 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
762 
763 		do {
764 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
765 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
767 				continue;
768 			}
769 			if (!dm->dmub_callback[notify.type]) {
770 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
771 				continue;
772 			}
773 			if (dm->dmub_thread_offload[notify.type] == true) {
774 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
775 				if (!dmub_hpd_wrk) {
776 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
777 					return;
778 				}
779 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780 				if (!dmub_hpd_wrk->dmub_notify) {
781 					kfree(dmub_hpd_wrk);
782 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
783 					return;
784 				}
785 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786 				if (dmub_hpd_wrk->dmub_notify)
787 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
788 				dmub_hpd_wrk->adev = adev;
789 				if (notify.type == DMUB_NOTIFICATION_HPD) {
790 					plink = adev->dm.dc->links[notify.link_index];
791 					if (plink) {
792 						plink->hpd_status =
793 							notify.hpd_status ==
794 							DP_HPD_PLUG ? true : false;
795 					}
796 				}
797 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
798 			} else {
799 				dm->dmub_callback[notify.type](adev, &notify);
800 			}
801 		} while (notify.pending_notification);
802 	}
803 
804 
805 	do {
806 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
807 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
808 							entry.param0, entry.param1);
809 
810 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
811 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
812 		} else
813 			break;
814 
815 		count++;
816 
817 	} while (count <= DMUB_TRACE_MAX_READ);
818 
819 	if (count > DMUB_TRACE_MAX_READ)
820 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
821 }
822 #endif /* CONFIG_DRM_AMD_DC_DCN */
823 
824 static int dm_set_clockgating_state(void *handle,
825 		  enum amd_clockgating_state state)
826 {
827 	return 0;
828 }
829 
830 static int dm_set_powergating_state(void *handle,
831 		  enum amd_powergating_state state)
832 {
833 	return 0;
834 }
835 
836 /* Prototypes of private functions */
837 static int dm_early_init(void* handle);
838 
839 /* Allocate memory for FBC compressed data  */
840 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
841 {
842 	struct drm_device *dev = connector->dev;
843 	struct amdgpu_device *adev = drm_to_adev(dev);
844 	struct dm_compressor_info *compressor = &adev->dm.compressor;
845 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
846 	struct drm_display_mode *mode;
847 	unsigned long max_size = 0;
848 
849 	if (adev->dm.dc->fbc_compressor == NULL)
850 		return;
851 
852 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
853 		return;
854 
855 	if (compressor->bo_ptr)
856 		return;
857 
858 
859 	list_for_each_entry(mode, &connector->modes, head) {
860 		if (max_size < mode->htotal * mode->vtotal)
861 			max_size = mode->htotal * mode->vtotal;
862 	}
863 
864 	if (max_size) {
865 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
866 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
867 			    &compressor->gpu_addr, &compressor->cpu_addr);
868 
869 		if (r)
870 			DRM_ERROR("DM: Failed to initialize FBC\n");
871 		else {
872 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
873 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
874 		}
875 
876 	}
877 
878 }
879 
880 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
881 					  int pipe, bool *enabled,
882 					  unsigned char *buf, int max_bytes)
883 {
884 	struct drm_device *dev = dev_get_drvdata(kdev);
885 	struct amdgpu_device *adev = drm_to_adev(dev);
886 	struct drm_connector *connector;
887 	struct drm_connector_list_iter conn_iter;
888 	struct amdgpu_dm_connector *aconnector;
889 	int ret = 0;
890 
891 	*enabled = false;
892 
893 	mutex_lock(&adev->dm.audio_lock);
894 
895 	drm_connector_list_iter_begin(dev, &conn_iter);
896 	drm_for_each_connector_iter(connector, &conn_iter) {
897 		aconnector = to_amdgpu_dm_connector(connector);
898 		if (aconnector->audio_inst != port)
899 			continue;
900 
901 		*enabled = true;
902 		ret = drm_eld_size(connector->eld);
903 		memcpy(buf, connector->eld, min(max_bytes, ret));
904 
905 		break;
906 	}
907 	drm_connector_list_iter_end(&conn_iter);
908 
909 	mutex_unlock(&adev->dm.audio_lock);
910 
911 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
912 
913 	return ret;
914 }
915 
916 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
917 	.get_eld = amdgpu_dm_audio_component_get_eld,
918 };
919 
920 static int amdgpu_dm_audio_component_bind(struct device *kdev,
921 				       struct device *hda_kdev, void *data)
922 {
923 	struct drm_device *dev = dev_get_drvdata(kdev);
924 	struct amdgpu_device *adev = drm_to_adev(dev);
925 	struct drm_audio_component *acomp = data;
926 
927 	acomp->ops = &amdgpu_dm_audio_component_ops;
928 	acomp->dev = kdev;
929 	adev->dm.audio_component = acomp;
930 
931 	return 0;
932 }
933 
934 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
935 					  struct device *hda_kdev, void *data)
936 {
937 	struct drm_device *dev = dev_get_drvdata(kdev);
938 	struct amdgpu_device *adev = drm_to_adev(dev);
939 	struct drm_audio_component *acomp = data;
940 
941 	acomp->ops = NULL;
942 	acomp->dev = NULL;
943 	adev->dm.audio_component = NULL;
944 }
945 
946 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
947 	.bind	= amdgpu_dm_audio_component_bind,
948 	.unbind	= amdgpu_dm_audio_component_unbind,
949 };
950 
951 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
952 {
953 	int i, ret;
954 
955 	if (!amdgpu_audio)
956 		return 0;
957 
958 	adev->mode_info.audio.enabled = true;
959 
960 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
961 
962 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
963 		adev->mode_info.audio.pin[i].channels = -1;
964 		adev->mode_info.audio.pin[i].rate = -1;
965 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
966 		adev->mode_info.audio.pin[i].status_bits = 0;
967 		adev->mode_info.audio.pin[i].category_code = 0;
968 		adev->mode_info.audio.pin[i].connected = false;
969 		adev->mode_info.audio.pin[i].id =
970 			adev->dm.dc->res_pool->audios[i]->inst;
971 		adev->mode_info.audio.pin[i].offset = 0;
972 	}
973 
974 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
975 	if (ret < 0)
976 		return ret;
977 
978 	adev->dm.audio_registered = true;
979 
980 	return 0;
981 }
982 
983 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
984 {
985 	if (!amdgpu_audio)
986 		return;
987 
988 	if (!adev->mode_info.audio.enabled)
989 		return;
990 
991 	if (adev->dm.audio_registered) {
992 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
993 		adev->dm.audio_registered = false;
994 	}
995 
996 	/* TODO: Disable audio? */
997 
998 	adev->mode_info.audio.enabled = false;
999 }
1000 
1001 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1002 {
1003 	struct drm_audio_component *acomp = adev->dm.audio_component;
1004 
1005 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1006 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1007 
1008 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1009 						 pin, -1);
1010 	}
1011 }
1012 
1013 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1014 {
1015 	const struct dmcub_firmware_header_v1_0 *hdr;
1016 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1017 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1018 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1019 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1020 	struct abm *abm = adev->dm.dc->res_pool->abm;
1021 	struct dmub_srv_hw_params hw_params;
1022 	enum dmub_status status;
1023 	const unsigned char *fw_inst_const, *fw_bss_data;
1024 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1025 	bool has_hw_support;
1026 	struct dc *dc = adev->dm.dc;
1027 
1028 	if (!dmub_srv)
1029 		/* DMUB isn't supported on the ASIC. */
1030 		return 0;
1031 
1032 	if (!fb_info) {
1033 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1034 		return -EINVAL;
1035 	}
1036 
1037 	if (!dmub_fw) {
1038 		/* Firmware required for DMUB support. */
1039 		DRM_ERROR("No firmware provided for DMUB.\n");
1040 		return -EINVAL;
1041 	}
1042 
1043 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1044 	if (status != DMUB_STATUS_OK) {
1045 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1046 		return -EINVAL;
1047 	}
1048 
1049 	if (!has_hw_support) {
1050 		DRM_INFO("DMUB unsupported on ASIC\n");
1051 		return 0;
1052 	}
1053 
1054 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1055 
1056 	fw_inst_const = dmub_fw->data +
1057 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1058 			PSP_HEADER_BYTES;
1059 
1060 	fw_bss_data = dmub_fw->data +
1061 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1062 		      le32_to_cpu(hdr->inst_const_bytes);
1063 
1064 	/* Copy firmware and bios info into FB memory. */
1065 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1066 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1067 
1068 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1069 
1070 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1071 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1072 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1073 	 * will be done by dm_dmub_hw_init
1074 	 */
1075 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1076 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1077 				fw_inst_const_size);
1078 	}
1079 
1080 	if (fw_bss_data_size)
1081 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1082 		       fw_bss_data, fw_bss_data_size);
1083 
1084 	/* Copy firmware bios info into FB memory. */
1085 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1086 	       adev->bios_size);
1087 
1088 	/* Reset regions that need to be reset. */
1089 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1090 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1091 
1092 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1093 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1094 
1095 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1096 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1097 
1098 	/* Initialize hardware. */
1099 	memset(&hw_params, 0, sizeof(hw_params));
1100 	hw_params.fb_base = adev->gmc.fb_start;
1101 	hw_params.fb_offset = adev->gmc.aper_base;
1102 
1103 	/* backdoor load firmware and trigger dmub running */
1104 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1105 		hw_params.load_inst_const = true;
1106 
1107 	if (dmcu)
1108 		hw_params.psp_version = dmcu->psp_version;
1109 
1110 	for (i = 0; i < fb_info->num_fb; ++i)
1111 		hw_params.fb[i] = &fb_info->fb[i];
1112 
1113 	switch (adev->asic_type) {
1114 	case CHIP_YELLOW_CARP:
1115 		if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1116 			hw_params.dpia_supported = true;
1117 #if defined(CONFIG_DRM_AMD_DC_DCN)
1118 			hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1119 #endif
1120 		}
1121 		break;
1122 	default:
1123 		break;
1124 	}
1125 
1126 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1127 	if (status != DMUB_STATUS_OK) {
1128 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1129 		return -EINVAL;
1130 	}
1131 
1132 	/* Wait for firmware load to finish. */
1133 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1134 	if (status != DMUB_STATUS_OK)
1135 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1136 
1137 	/* Init DMCU and ABM if available. */
1138 	if (dmcu && abm) {
1139 		dmcu->funcs->dmcu_init(dmcu);
1140 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1141 	}
1142 
1143 	if (!adev->dm.dc->ctx->dmub_srv)
1144 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1145 	if (!adev->dm.dc->ctx->dmub_srv) {
1146 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1147 		return -ENOMEM;
1148 	}
1149 
1150 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1151 		 adev->dm.dmcub_fw_version);
1152 
1153 	return 0;
1154 }
1155 
1156 #if defined(CONFIG_DRM_AMD_DC_DCN)
1157 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1158 {
1159 	uint64_t pt_base;
1160 	uint32_t logical_addr_low;
1161 	uint32_t logical_addr_high;
1162 	uint32_t agp_base, agp_bot, agp_top;
1163 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1164 
1165 	memset(pa_config, 0, sizeof(*pa_config));
1166 
1167 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1168 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1169 
1170 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1171 		/*
1172 		 * Raven2 has a HW issue that it is unable to use the vram which
1173 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1174 		 * workaround that increase system aperture high address (add 1)
1175 		 * to get rid of the VM fault and hardware hang.
1176 		 */
1177 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1178 	else
1179 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1180 
1181 	agp_base = 0;
1182 	agp_bot = adev->gmc.agp_start >> 24;
1183 	agp_top = adev->gmc.agp_end >> 24;
1184 
1185 
1186 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1187 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1188 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1189 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1190 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1191 	page_table_base.low_part = lower_32_bits(pt_base);
1192 
1193 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1194 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1195 
1196 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1197 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1198 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1199 
1200 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1201 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1202 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1203 
1204 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1205 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1206 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1207 
1208 	pa_config->is_hvm_enabled = 0;
1209 
1210 }
1211 #endif
1212 #if defined(CONFIG_DRM_AMD_DC_DCN)
1213 static void vblank_control_worker(struct work_struct *work)
1214 {
1215 	struct vblank_control_work *vblank_work =
1216 		container_of(work, struct vblank_control_work, work);
1217 	struct amdgpu_display_manager *dm = vblank_work->dm;
1218 
1219 	mutex_lock(&dm->dc_lock);
1220 
1221 	if (vblank_work->enable)
1222 		dm->active_vblank_irq_count++;
1223 	else if(dm->active_vblank_irq_count)
1224 		dm->active_vblank_irq_count--;
1225 
1226 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1227 
1228 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1229 
1230 	/* Control PSR based on vblank requirements from OS */
1231 	if (vblank_work->stream && vblank_work->stream->link) {
1232 		if (vblank_work->enable) {
1233 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1234 				amdgpu_dm_psr_disable(vblank_work->stream);
1235 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1236 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1237 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1238 			amdgpu_dm_psr_enable(vblank_work->stream);
1239 		}
1240 	}
1241 
1242 	mutex_unlock(&dm->dc_lock);
1243 
1244 	dc_stream_release(vblank_work->stream);
1245 
1246 	kfree(vblank_work);
1247 }
1248 
1249 #endif
1250 
1251 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1252 {
1253 	struct hpd_rx_irq_offload_work *offload_work;
1254 	struct amdgpu_dm_connector *aconnector;
1255 	struct dc_link *dc_link;
1256 	struct amdgpu_device *adev;
1257 	enum dc_connection_type new_connection_type = dc_connection_none;
1258 	unsigned long flags;
1259 
1260 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1261 	aconnector = offload_work->offload_wq->aconnector;
1262 
1263 	if (!aconnector) {
1264 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1265 		goto skip;
1266 	}
1267 
1268 	adev = drm_to_adev(aconnector->base.dev);
1269 	dc_link = aconnector->dc_link;
1270 
1271 	mutex_lock(&aconnector->hpd_lock);
1272 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1273 		DRM_ERROR("KMS: Failed to detect connector\n");
1274 	mutex_unlock(&aconnector->hpd_lock);
1275 
1276 	if (new_connection_type == dc_connection_none)
1277 		goto skip;
1278 
1279 	if (amdgpu_in_reset(adev))
1280 		goto skip;
1281 
1282 	mutex_lock(&adev->dm.dc_lock);
1283 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1284 		dc_link_dp_handle_automated_test(dc_link);
1285 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1286 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1287 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1288 		dc_link_dp_handle_link_loss(dc_link);
1289 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1290 		offload_work->offload_wq->is_handling_link_loss = false;
1291 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1292 	}
1293 	mutex_unlock(&adev->dm.dc_lock);
1294 
1295 skip:
1296 	kfree(offload_work);
1297 
1298 }
1299 
1300 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1301 {
1302 	int max_caps = dc->caps.max_links;
1303 	int i = 0;
1304 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1305 
1306 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1307 
1308 	if (!hpd_rx_offload_wq)
1309 		return NULL;
1310 
1311 
1312 	for (i = 0; i < max_caps; i++) {
1313 		hpd_rx_offload_wq[i].wq =
1314 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1315 
1316 		if (hpd_rx_offload_wq[i].wq == NULL) {
1317 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1318 			return NULL;
1319 		}
1320 
1321 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1322 	}
1323 
1324 	return hpd_rx_offload_wq;
1325 }
1326 
1327 struct amdgpu_stutter_quirk {
1328 	u16 chip_vendor;
1329 	u16 chip_device;
1330 	u16 subsys_vendor;
1331 	u16 subsys_device;
1332 	u8 revision;
1333 };
1334 
1335 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1336 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1337 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1338 	{ 0, 0, 0, 0, 0 },
1339 };
1340 
1341 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1342 {
1343 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1344 
1345 	while (p && p->chip_device != 0) {
1346 		if (pdev->vendor == p->chip_vendor &&
1347 		    pdev->device == p->chip_device &&
1348 		    pdev->subsystem_vendor == p->subsys_vendor &&
1349 		    pdev->subsystem_device == p->subsys_device &&
1350 		    pdev->revision == p->revision) {
1351 			return true;
1352 		}
1353 		++p;
1354 	}
1355 	return false;
1356 }
1357 
1358 static int amdgpu_dm_init(struct amdgpu_device *adev)
1359 {
1360 	struct dc_init_data init_data;
1361 #ifdef CONFIG_DRM_AMD_DC_HDCP
1362 	struct dc_callback_init init_params;
1363 #endif
1364 	int r;
1365 
1366 	adev->dm.ddev = adev_to_drm(adev);
1367 	adev->dm.adev = adev;
1368 
1369 	/* Zero all the fields */
1370 	memset(&init_data, 0, sizeof(init_data));
1371 #ifdef CONFIG_DRM_AMD_DC_HDCP
1372 	memset(&init_params, 0, sizeof(init_params));
1373 #endif
1374 
1375 	mutex_init(&adev->dm.dc_lock);
1376 	mutex_init(&adev->dm.audio_lock);
1377 #if defined(CONFIG_DRM_AMD_DC_DCN)
1378 	spin_lock_init(&adev->dm.vblank_lock);
1379 #endif
1380 
1381 	if(amdgpu_dm_irq_init(adev)) {
1382 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1383 		goto error;
1384 	}
1385 
1386 	init_data.asic_id.chip_family = adev->family;
1387 
1388 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1389 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1390 	init_data.asic_id.chip_id = adev->pdev->device;
1391 
1392 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1393 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1394 	init_data.asic_id.atombios_base_address =
1395 		adev->mode_info.atom_context->bios;
1396 
1397 	init_data.driver = adev;
1398 
1399 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1400 
1401 	if (!adev->dm.cgs_device) {
1402 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1403 		goto error;
1404 	}
1405 
1406 	init_data.cgs_device = adev->dm.cgs_device;
1407 
1408 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1409 
1410 	switch (adev->asic_type) {
1411 	case CHIP_CARRIZO:
1412 	case CHIP_STONEY:
1413 		init_data.flags.gpu_vm_support = true;
1414 		break;
1415 	default:
1416 		switch (adev->ip_versions[DCE_HWIP][0]) {
1417 		case IP_VERSION(2, 1, 0):
1418 			init_data.flags.gpu_vm_support = true;
1419 			switch (adev->dm.dmcub_fw_version) {
1420 			case 0: /* development */
1421 			case 0x1: /* linux-firmware.git hash 6d9f399 */
1422 			case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1423 				init_data.flags.disable_dmcu = false;
1424 				break;
1425 			default:
1426 				init_data.flags.disable_dmcu = true;
1427 			}
1428 			break;
1429 		case IP_VERSION(1, 0, 0):
1430 		case IP_VERSION(1, 0, 1):
1431 		case IP_VERSION(3, 0, 1):
1432 		case IP_VERSION(3, 1, 2):
1433 		case IP_VERSION(3, 1, 3):
1434 			init_data.flags.gpu_vm_support = true;
1435 			break;
1436 		case IP_VERSION(2, 0, 3):
1437 			init_data.flags.disable_dmcu = true;
1438 			break;
1439 		default:
1440 			break;
1441 		}
1442 		break;
1443 	}
1444 
1445 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1446 		init_data.flags.fbc_support = true;
1447 
1448 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1449 		init_data.flags.multi_mon_pp_mclk_switch = true;
1450 
1451 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1452 		init_data.flags.disable_fractional_pwm = true;
1453 
1454 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1455 		init_data.flags.edp_no_power_sequencing = true;
1456 
1457 	init_data.flags.power_down_display_on_boot = true;
1458 
1459 	INIT_LIST_HEAD(&adev->dm.da_list);
1460 	/* Display Core create. */
1461 	adev->dm.dc = dc_create(&init_data);
1462 
1463 	if (adev->dm.dc) {
1464 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1465 	} else {
1466 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1467 		goto error;
1468 	}
1469 
1470 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1471 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1472 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1473 	}
1474 
1475 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1476 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1477 	if (dm_should_disable_stutter(adev->pdev))
1478 		adev->dm.dc->debug.disable_stutter = true;
1479 
1480 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1481 		adev->dm.dc->debug.disable_stutter = true;
1482 
1483 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1484 		adev->dm.dc->debug.disable_dsc = true;
1485 
1486 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1487 		adev->dm.dc->debug.disable_clock_gate = true;
1488 
1489 	r = dm_dmub_hw_init(adev);
1490 	if (r) {
1491 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1492 		goto error;
1493 	}
1494 
1495 	dc_hardware_init(adev->dm.dc);
1496 
1497 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1498 	if (!adev->dm.hpd_rx_offload_wq) {
1499 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1500 		goto error;
1501 	}
1502 
1503 #if defined(CONFIG_DRM_AMD_DC_DCN)
1504 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1505 		struct dc_phy_addr_space_config pa_config;
1506 
1507 		mmhub_read_system_context(adev, &pa_config);
1508 
1509 		// Call the DC init_memory func
1510 		dc_setup_system_context(adev->dm.dc, &pa_config);
1511 	}
1512 #endif
1513 
1514 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1515 	if (!adev->dm.freesync_module) {
1516 		DRM_ERROR(
1517 		"amdgpu: failed to initialize freesync_module.\n");
1518 	} else
1519 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1520 				adev->dm.freesync_module);
1521 
1522 	amdgpu_dm_init_color_mod();
1523 
1524 #if defined(CONFIG_DRM_AMD_DC_DCN)
1525 	if (adev->dm.dc->caps.max_links > 0) {
1526 		adev->dm.vblank_control_workqueue =
1527 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1528 		if (!adev->dm.vblank_control_workqueue)
1529 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1530 	}
1531 #endif
1532 
1533 #ifdef CONFIG_DRM_AMD_DC_HDCP
1534 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1535 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1536 
1537 		if (!adev->dm.hdcp_workqueue)
1538 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1539 		else
1540 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1541 
1542 		dc_init_callbacks(adev->dm.dc, &init_params);
1543 	}
1544 #endif
1545 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1546 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1547 #endif
1548 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1549 		init_completion(&adev->dm.dmub_aux_transfer_done);
1550 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1551 		if (!adev->dm.dmub_notify) {
1552 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1553 			goto error;
1554 		}
1555 
1556 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1557 		if (!adev->dm.delayed_hpd_wq) {
1558 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1559 			goto error;
1560 		}
1561 
1562 		amdgpu_dm_outbox_init(adev);
1563 #if defined(CONFIG_DRM_AMD_DC_DCN)
1564 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1565 			dmub_aux_setconfig_callback, false)) {
1566 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1567 			goto error;
1568 		}
1569 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1570 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1571 			goto error;
1572 		}
1573 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1574 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1575 			goto error;
1576 		}
1577 #endif /* CONFIG_DRM_AMD_DC_DCN */
1578 	}
1579 
1580 	if (amdgpu_dm_initialize_drm_device(adev)) {
1581 		DRM_ERROR(
1582 		"amdgpu: failed to initialize sw for display support.\n");
1583 		goto error;
1584 	}
1585 
1586 	/* create fake encoders for MST */
1587 	dm_dp_create_fake_mst_encoders(adev);
1588 
1589 	/* TODO: Add_display_info? */
1590 
1591 	/* TODO use dynamic cursor width */
1592 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1593 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1594 
1595 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1596 		DRM_ERROR(
1597 		"amdgpu: failed to initialize sw for display support.\n");
1598 		goto error;
1599 	}
1600 
1601 
1602 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1603 
1604 	return 0;
1605 error:
1606 	amdgpu_dm_fini(adev);
1607 
1608 	return -EINVAL;
1609 }
1610 
1611 static int amdgpu_dm_early_fini(void *handle)
1612 {
1613 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1614 
1615 	amdgpu_dm_audio_fini(adev);
1616 
1617 	return 0;
1618 }
1619 
1620 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1621 {
1622 	int i;
1623 
1624 #if defined(CONFIG_DRM_AMD_DC_DCN)
1625 	if (adev->dm.vblank_control_workqueue) {
1626 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1627 		adev->dm.vblank_control_workqueue = NULL;
1628 	}
1629 #endif
1630 
1631 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1632 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1633 	}
1634 
1635 	amdgpu_dm_destroy_drm_device(&adev->dm);
1636 
1637 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1638 	if (adev->dm.crc_rd_wrk) {
1639 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1640 		kfree(adev->dm.crc_rd_wrk);
1641 		adev->dm.crc_rd_wrk = NULL;
1642 	}
1643 #endif
1644 #ifdef CONFIG_DRM_AMD_DC_HDCP
1645 	if (adev->dm.hdcp_workqueue) {
1646 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1647 		adev->dm.hdcp_workqueue = NULL;
1648 	}
1649 
1650 	if (adev->dm.dc)
1651 		dc_deinit_callbacks(adev->dm.dc);
1652 #endif
1653 
1654 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1655 
1656 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1657 		kfree(adev->dm.dmub_notify);
1658 		adev->dm.dmub_notify = NULL;
1659 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1660 		adev->dm.delayed_hpd_wq = NULL;
1661 	}
1662 
1663 	if (adev->dm.dmub_bo)
1664 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1665 				      &adev->dm.dmub_bo_gpu_addr,
1666 				      &adev->dm.dmub_bo_cpu_addr);
1667 
1668 	if (adev->dm.hpd_rx_offload_wq) {
1669 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1670 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1671 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1672 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1673 			}
1674 		}
1675 
1676 		kfree(adev->dm.hpd_rx_offload_wq);
1677 		adev->dm.hpd_rx_offload_wq = NULL;
1678 	}
1679 
1680 	/* DC Destroy TODO: Replace destroy DAL */
1681 	if (adev->dm.dc)
1682 		dc_destroy(&adev->dm.dc);
1683 	/*
1684 	 * TODO: pageflip, vlank interrupt
1685 	 *
1686 	 * amdgpu_dm_irq_fini(adev);
1687 	 */
1688 
1689 	if (adev->dm.cgs_device) {
1690 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1691 		adev->dm.cgs_device = NULL;
1692 	}
1693 	if (adev->dm.freesync_module) {
1694 		mod_freesync_destroy(adev->dm.freesync_module);
1695 		adev->dm.freesync_module = NULL;
1696 	}
1697 
1698 	mutex_destroy(&adev->dm.audio_lock);
1699 	mutex_destroy(&adev->dm.dc_lock);
1700 
1701 	return;
1702 }
1703 
1704 static int load_dmcu_fw(struct amdgpu_device *adev)
1705 {
1706 	const char *fw_name_dmcu = NULL;
1707 	int r;
1708 	const struct dmcu_firmware_header_v1_0 *hdr;
1709 
1710 	switch(adev->asic_type) {
1711 #if defined(CONFIG_DRM_AMD_DC_SI)
1712 	case CHIP_TAHITI:
1713 	case CHIP_PITCAIRN:
1714 	case CHIP_VERDE:
1715 	case CHIP_OLAND:
1716 #endif
1717 	case CHIP_BONAIRE:
1718 	case CHIP_HAWAII:
1719 	case CHIP_KAVERI:
1720 	case CHIP_KABINI:
1721 	case CHIP_MULLINS:
1722 	case CHIP_TONGA:
1723 	case CHIP_FIJI:
1724 	case CHIP_CARRIZO:
1725 	case CHIP_STONEY:
1726 	case CHIP_POLARIS11:
1727 	case CHIP_POLARIS10:
1728 	case CHIP_POLARIS12:
1729 	case CHIP_VEGAM:
1730 	case CHIP_VEGA10:
1731 	case CHIP_VEGA12:
1732 	case CHIP_VEGA20:
1733 		return 0;
1734 	case CHIP_NAVI12:
1735 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1736 		break;
1737 	case CHIP_RAVEN:
1738 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1739 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1740 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1741 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1742 		else
1743 			return 0;
1744 		break;
1745 	default:
1746 		switch (adev->ip_versions[DCE_HWIP][0]) {
1747 		case IP_VERSION(2, 0, 2):
1748 		case IP_VERSION(2, 0, 3):
1749 		case IP_VERSION(2, 0, 0):
1750 		case IP_VERSION(2, 1, 0):
1751 		case IP_VERSION(3, 0, 0):
1752 		case IP_VERSION(3, 0, 2):
1753 		case IP_VERSION(3, 0, 3):
1754 		case IP_VERSION(3, 0, 1):
1755 		case IP_VERSION(3, 1, 2):
1756 		case IP_VERSION(3, 1, 3):
1757 			return 0;
1758 		default:
1759 			break;
1760 		}
1761 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1762 		return -EINVAL;
1763 	}
1764 
1765 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1766 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1767 		return 0;
1768 	}
1769 
1770 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1771 	if (r == -ENOENT) {
1772 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1773 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1774 		adev->dm.fw_dmcu = NULL;
1775 		return 0;
1776 	}
1777 	if (r) {
1778 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1779 			fw_name_dmcu);
1780 		return r;
1781 	}
1782 
1783 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1784 	if (r) {
1785 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1786 			fw_name_dmcu);
1787 		release_firmware(adev->dm.fw_dmcu);
1788 		adev->dm.fw_dmcu = NULL;
1789 		return r;
1790 	}
1791 
1792 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1793 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1794 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1795 	adev->firmware.fw_size +=
1796 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1797 
1798 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1799 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1800 	adev->firmware.fw_size +=
1801 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1802 
1803 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1804 
1805 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1806 
1807 	return 0;
1808 }
1809 
1810 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1811 {
1812 	struct amdgpu_device *adev = ctx;
1813 
1814 	return dm_read_reg(adev->dm.dc->ctx, address);
1815 }
1816 
1817 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1818 				     uint32_t value)
1819 {
1820 	struct amdgpu_device *adev = ctx;
1821 
1822 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1823 }
1824 
1825 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1826 {
1827 	struct dmub_srv_create_params create_params;
1828 	struct dmub_srv_region_params region_params;
1829 	struct dmub_srv_region_info region_info;
1830 	struct dmub_srv_fb_params fb_params;
1831 	struct dmub_srv_fb_info *fb_info;
1832 	struct dmub_srv *dmub_srv;
1833 	const struct dmcub_firmware_header_v1_0 *hdr;
1834 	const char *fw_name_dmub;
1835 	enum dmub_asic dmub_asic;
1836 	enum dmub_status status;
1837 	int r;
1838 
1839 	switch (adev->ip_versions[DCE_HWIP][0]) {
1840 	case IP_VERSION(2, 1, 0):
1841 		dmub_asic = DMUB_ASIC_DCN21;
1842 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1843 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1844 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1845 		break;
1846 	case IP_VERSION(3, 0, 0):
1847 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1848 			dmub_asic = DMUB_ASIC_DCN30;
1849 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1850 		} else {
1851 			dmub_asic = DMUB_ASIC_DCN30;
1852 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1853 		}
1854 		break;
1855 	case IP_VERSION(3, 0, 1):
1856 		dmub_asic = DMUB_ASIC_DCN301;
1857 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1858 		break;
1859 	case IP_VERSION(3, 0, 2):
1860 		dmub_asic = DMUB_ASIC_DCN302;
1861 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1862 		break;
1863 	case IP_VERSION(3, 0, 3):
1864 		dmub_asic = DMUB_ASIC_DCN303;
1865 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1866 		break;
1867 	case IP_VERSION(3, 1, 2):
1868 	case IP_VERSION(3, 1, 3):
1869 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1870 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1871 		break;
1872 
1873 	default:
1874 		/* ASIC doesn't support DMUB. */
1875 		return 0;
1876 	}
1877 
1878 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1879 	if (r) {
1880 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1881 		return 0;
1882 	}
1883 
1884 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1885 	if (r) {
1886 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1887 		return 0;
1888 	}
1889 
1890 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1891 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1892 
1893 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1894 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1895 			AMDGPU_UCODE_ID_DMCUB;
1896 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1897 			adev->dm.dmub_fw;
1898 		adev->firmware.fw_size +=
1899 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1900 
1901 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1902 			 adev->dm.dmcub_fw_version);
1903 	}
1904 
1905 
1906 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1907 	dmub_srv = adev->dm.dmub_srv;
1908 
1909 	if (!dmub_srv) {
1910 		DRM_ERROR("Failed to allocate DMUB service!\n");
1911 		return -ENOMEM;
1912 	}
1913 
1914 	memset(&create_params, 0, sizeof(create_params));
1915 	create_params.user_ctx = adev;
1916 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1917 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1918 	create_params.asic = dmub_asic;
1919 
1920 	/* Create the DMUB service. */
1921 	status = dmub_srv_create(dmub_srv, &create_params);
1922 	if (status != DMUB_STATUS_OK) {
1923 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1924 		return -EINVAL;
1925 	}
1926 
1927 	/* Calculate the size of all the regions for the DMUB service. */
1928 	memset(&region_params, 0, sizeof(region_params));
1929 
1930 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1931 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1932 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1933 	region_params.vbios_size = adev->bios_size;
1934 	region_params.fw_bss_data = region_params.bss_data_size ?
1935 		adev->dm.dmub_fw->data +
1936 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1937 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1938 	region_params.fw_inst_const =
1939 		adev->dm.dmub_fw->data +
1940 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1941 		PSP_HEADER_BYTES;
1942 
1943 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1944 					   &region_info);
1945 
1946 	if (status != DMUB_STATUS_OK) {
1947 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1948 		return -EINVAL;
1949 	}
1950 
1951 	/*
1952 	 * Allocate a framebuffer based on the total size of all the regions.
1953 	 * TODO: Move this into GART.
1954 	 */
1955 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1956 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1957 				    &adev->dm.dmub_bo_gpu_addr,
1958 				    &adev->dm.dmub_bo_cpu_addr);
1959 	if (r)
1960 		return r;
1961 
1962 	/* Rebase the regions on the framebuffer address. */
1963 	memset(&fb_params, 0, sizeof(fb_params));
1964 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1965 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1966 	fb_params.region_info = &region_info;
1967 
1968 	adev->dm.dmub_fb_info =
1969 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1970 	fb_info = adev->dm.dmub_fb_info;
1971 
1972 	if (!fb_info) {
1973 		DRM_ERROR(
1974 			"Failed to allocate framebuffer info for DMUB service!\n");
1975 		return -ENOMEM;
1976 	}
1977 
1978 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1979 	if (status != DMUB_STATUS_OK) {
1980 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1981 		return -EINVAL;
1982 	}
1983 
1984 	return 0;
1985 }
1986 
1987 static int dm_sw_init(void *handle)
1988 {
1989 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1990 	int r;
1991 
1992 	r = dm_dmub_sw_init(adev);
1993 	if (r)
1994 		return r;
1995 
1996 	return load_dmcu_fw(adev);
1997 }
1998 
1999 static int dm_sw_fini(void *handle)
2000 {
2001 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2002 
2003 	kfree(adev->dm.dmub_fb_info);
2004 	adev->dm.dmub_fb_info = NULL;
2005 
2006 	if (adev->dm.dmub_srv) {
2007 		dmub_srv_destroy(adev->dm.dmub_srv);
2008 		adev->dm.dmub_srv = NULL;
2009 	}
2010 
2011 	release_firmware(adev->dm.dmub_fw);
2012 	adev->dm.dmub_fw = NULL;
2013 
2014 	release_firmware(adev->dm.fw_dmcu);
2015 	adev->dm.fw_dmcu = NULL;
2016 
2017 	return 0;
2018 }
2019 
2020 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2021 {
2022 	struct amdgpu_dm_connector *aconnector;
2023 	struct drm_connector *connector;
2024 	struct drm_connector_list_iter iter;
2025 	int ret = 0;
2026 
2027 	drm_connector_list_iter_begin(dev, &iter);
2028 	drm_for_each_connector_iter(connector, &iter) {
2029 		aconnector = to_amdgpu_dm_connector(connector);
2030 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2031 		    aconnector->mst_mgr.aux) {
2032 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2033 					 aconnector,
2034 					 aconnector->base.base.id);
2035 
2036 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2037 			if (ret < 0) {
2038 				DRM_ERROR("DM_MST: Failed to start MST\n");
2039 				aconnector->dc_link->type =
2040 					dc_connection_single;
2041 				break;
2042 			}
2043 		}
2044 	}
2045 	drm_connector_list_iter_end(&iter);
2046 
2047 	return ret;
2048 }
2049 
2050 static int dm_late_init(void *handle)
2051 {
2052 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2053 
2054 	struct dmcu_iram_parameters params;
2055 	unsigned int linear_lut[16];
2056 	int i;
2057 	struct dmcu *dmcu = NULL;
2058 
2059 	dmcu = adev->dm.dc->res_pool->dmcu;
2060 
2061 	for (i = 0; i < 16; i++)
2062 		linear_lut[i] = 0xFFFF * i / 15;
2063 
2064 	params.set = 0;
2065 	params.backlight_ramping_override = false;
2066 	params.backlight_ramping_start = 0xCCCC;
2067 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2068 	params.backlight_lut_array_size = 16;
2069 	params.backlight_lut_array = linear_lut;
2070 
2071 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2072 	 * 0xFFFF x 0.01 = 0x28F
2073 	 */
2074 	params.min_abm_backlight = 0x28F;
2075 	/* In the case where abm is implemented on dmcub,
2076 	* dmcu object will be null.
2077 	* ABM 2.4 and up are implemented on dmcub.
2078 	*/
2079 	if (dmcu) {
2080 		if (!dmcu_load_iram(dmcu, params))
2081 			return -EINVAL;
2082 	} else if (adev->dm.dc->ctx->dmub_srv) {
2083 		struct dc_link *edp_links[MAX_NUM_EDP];
2084 		int edp_num;
2085 
2086 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2087 		for (i = 0; i < edp_num; i++) {
2088 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2089 				return -EINVAL;
2090 		}
2091 	}
2092 
2093 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2094 }
2095 
2096 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2097 {
2098 	struct amdgpu_dm_connector *aconnector;
2099 	struct drm_connector *connector;
2100 	struct drm_connector_list_iter iter;
2101 	struct drm_dp_mst_topology_mgr *mgr;
2102 	int ret;
2103 	bool need_hotplug = false;
2104 
2105 	drm_connector_list_iter_begin(dev, &iter);
2106 	drm_for_each_connector_iter(connector, &iter) {
2107 		aconnector = to_amdgpu_dm_connector(connector);
2108 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2109 		    aconnector->mst_port)
2110 			continue;
2111 
2112 		mgr = &aconnector->mst_mgr;
2113 
2114 		if (suspend) {
2115 			drm_dp_mst_topology_mgr_suspend(mgr);
2116 		} else {
2117 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2118 			if (ret < 0) {
2119 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2120 				need_hotplug = true;
2121 			}
2122 		}
2123 	}
2124 	drm_connector_list_iter_end(&iter);
2125 
2126 	if (need_hotplug)
2127 		drm_kms_helper_hotplug_event(dev);
2128 }
2129 
2130 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2131 {
2132 	struct smu_context *smu = &adev->smu;
2133 	int ret = 0;
2134 
2135 	if (!is_support_sw_smu(adev))
2136 		return 0;
2137 
2138 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2139 	 * on window driver dc implementation.
2140 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2141 	 * should be passed to smu during boot up and resume from s3.
2142 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2143 	 * dcn20_resource_construct
2144 	 * then call pplib functions below to pass the settings to smu:
2145 	 * smu_set_watermarks_for_clock_ranges
2146 	 * smu_set_watermarks_table
2147 	 * navi10_set_watermarks_table
2148 	 * smu_write_watermarks_table
2149 	 *
2150 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2151 	 * dc has implemented different flow for window driver:
2152 	 * dc_hardware_init / dc_set_power_state
2153 	 * dcn10_init_hw
2154 	 * notify_wm_ranges
2155 	 * set_wm_ranges
2156 	 * -- Linux
2157 	 * smu_set_watermarks_for_clock_ranges
2158 	 * renoir_set_watermarks_table
2159 	 * smu_write_watermarks_table
2160 	 *
2161 	 * For Linux,
2162 	 * dc_hardware_init -> amdgpu_dm_init
2163 	 * dc_set_power_state --> dm_resume
2164 	 *
2165 	 * therefore, this function apply to navi10/12/14 but not Renoir
2166 	 * *
2167 	 */
2168 	switch (adev->ip_versions[DCE_HWIP][0]) {
2169 	case IP_VERSION(2, 0, 2):
2170 	case IP_VERSION(2, 0, 0):
2171 		break;
2172 	default:
2173 		return 0;
2174 	}
2175 
2176 	ret = smu_write_watermarks_table(smu);
2177 	if (ret) {
2178 		DRM_ERROR("Failed to update WMTABLE!\n");
2179 		return ret;
2180 	}
2181 
2182 	return 0;
2183 }
2184 
2185 /**
2186  * dm_hw_init() - Initialize DC device
2187  * @handle: The base driver device containing the amdgpu_dm device.
2188  *
2189  * Initialize the &struct amdgpu_display_manager device. This involves calling
2190  * the initializers of each DM component, then populating the struct with them.
2191  *
2192  * Although the function implies hardware initialization, both hardware and
2193  * software are initialized here. Splitting them out to their relevant init
2194  * hooks is a future TODO item.
2195  *
2196  * Some notable things that are initialized here:
2197  *
2198  * - Display Core, both software and hardware
2199  * - DC modules that we need (freesync and color management)
2200  * - DRM software states
2201  * - Interrupt sources and handlers
2202  * - Vblank support
2203  * - Debug FS entries, if enabled
2204  */
2205 static int dm_hw_init(void *handle)
2206 {
2207 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2208 	/* Create DAL display manager */
2209 	amdgpu_dm_init(adev);
2210 	amdgpu_dm_hpd_init(adev);
2211 
2212 	return 0;
2213 }
2214 
2215 /**
2216  * dm_hw_fini() - Teardown DC device
2217  * @handle: The base driver device containing the amdgpu_dm device.
2218  *
2219  * Teardown components within &struct amdgpu_display_manager that require
2220  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2221  * were loaded. Also flush IRQ workqueues and disable them.
2222  */
2223 static int dm_hw_fini(void *handle)
2224 {
2225 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2226 
2227 	amdgpu_dm_hpd_fini(adev);
2228 
2229 	amdgpu_dm_irq_fini(adev);
2230 	amdgpu_dm_fini(adev);
2231 	return 0;
2232 }
2233 
2234 
2235 static int dm_enable_vblank(struct drm_crtc *crtc);
2236 static void dm_disable_vblank(struct drm_crtc *crtc);
2237 
2238 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2239 				 struct dc_state *state, bool enable)
2240 {
2241 	enum dc_irq_source irq_source;
2242 	struct amdgpu_crtc *acrtc;
2243 	int rc = -EBUSY;
2244 	int i = 0;
2245 
2246 	for (i = 0; i < state->stream_count; i++) {
2247 		acrtc = get_crtc_by_otg_inst(
2248 				adev, state->stream_status[i].primary_otg_inst);
2249 
2250 		if (acrtc && state->stream_status[i].plane_count != 0) {
2251 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2252 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2253 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2254 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2255 			if (rc)
2256 				DRM_WARN("Failed to %s pflip interrupts\n",
2257 					 enable ? "enable" : "disable");
2258 
2259 			if (enable) {
2260 				rc = dm_enable_vblank(&acrtc->base);
2261 				if (rc)
2262 					DRM_WARN("Failed to enable vblank interrupts\n");
2263 			} else {
2264 				dm_disable_vblank(&acrtc->base);
2265 			}
2266 
2267 		}
2268 	}
2269 
2270 }
2271 
2272 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2273 {
2274 	struct dc_state *context = NULL;
2275 	enum dc_status res = DC_ERROR_UNEXPECTED;
2276 	int i;
2277 	struct dc_stream_state *del_streams[MAX_PIPES];
2278 	int del_streams_count = 0;
2279 
2280 	memset(del_streams, 0, sizeof(del_streams));
2281 
2282 	context = dc_create_state(dc);
2283 	if (context == NULL)
2284 		goto context_alloc_fail;
2285 
2286 	dc_resource_state_copy_construct_current(dc, context);
2287 
2288 	/* First remove from context all streams */
2289 	for (i = 0; i < context->stream_count; i++) {
2290 		struct dc_stream_state *stream = context->streams[i];
2291 
2292 		del_streams[del_streams_count++] = stream;
2293 	}
2294 
2295 	/* Remove all planes for removed streams and then remove the streams */
2296 	for (i = 0; i < del_streams_count; i++) {
2297 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2298 			res = DC_FAIL_DETACH_SURFACES;
2299 			goto fail;
2300 		}
2301 
2302 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2303 		if (res != DC_OK)
2304 			goto fail;
2305 	}
2306 
2307 
2308 	res = dc_validate_global_state(dc, context, false);
2309 
2310 	if (res != DC_OK) {
2311 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2312 		goto fail;
2313 	}
2314 
2315 	res = dc_commit_state(dc, context);
2316 
2317 fail:
2318 	dc_release_state(context);
2319 
2320 context_alloc_fail:
2321 	return res;
2322 }
2323 
2324 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2325 {
2326 	int i;
2327 
2328 	if (dm->hpd_rx_offload_wq) {
2329 		for (i = 0; i < dm->dc->caps.max_links; i++)
2330 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2331 	}
2332 }
2333 
2334 static int dm_suspend(void *handle)
2335 {
2336 	struct amdgpu_device *adev = handle;
2337 	struct amdgpu_display_manager *dm = &adev->dm;
2338 	int ret = 0;
2339 
2340 	if (amdgpu_in_reset(adev)) {
2341 		mutex_lock(&dm->dc_lock);
2342 
2343 #if defined(CONFIG_DRM_AMD_DC_DCN)
2344 		dc_allow_idle_optimizations(adev->dm.dc, false);
2345 #endif
2346 
2347 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2348 
2349 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2350 
2351 		amdgpu_dm_commit_zero_streams(dm->dc);
2352 
2353 		amdgpu_dm_irq_suspend(adev);
2354 
2355 		hpd_rx_irq_work_suspend(dm);
2356 
2357 		return ret;
2358 	}
2359 
2360 	WARN_ON(adev->dm.cached_state);
2361 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2362 
2363 	s3_handle_mst(adev_to_drm(adev), true);
2364 
2365 	amdgpu_dm_irq_suspend(adev);
2366 
2367 	hpd_rx_irq_work_suspend(dm);
2368 
2369 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2370 
2371 	return 0;
2372 }
2373 
2374 static struct amdgpu_dm_connector *
2375 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2376 					     struct drm_crtc *crtc)
2377 {
2378 	uint32_t i;
2379 	struct drm_connector_state *new_con_state;
2380 	struct drm_connector *connector;
2381 	struct drm_crtc *crtc_from_state;
2382 
2383 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2384 		crtc_from_state = new_con_state->crtc;
2385 
2386 		if (crtc_from_state == crtc)
2387 			return to_amdgpu_dm_connector(connector);
2388 	}
2389 
2390 	return NULL;
2391 }
2392 
2393 static void emulated_link_detect(struct dc_link *link)
2394 {
2395 	struct dc_sink_init_data sink_init_data = { 0 };
2396 	struct display_sink_capability sink_caps = { 0 };
2397 	enum dc_edid_status edid_status;
2398 	struct dc_context *dc_ctx = link->ctx;
2399 	struct dc_sink *sink = NULL;
2400 	struct dc_sink *prev_sink = NULL;
2401 
2402 	link->type = dc_connection_none;
2403 	prev_sink = link->local_sink;
2404 
2405 	if (prev_sink)
2406 		dc_sink_release(prev_sink);
2407 
2408 	switch (link->connector_signal) {
2409 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2410 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2411 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2412 		break;
2413 	}
2414 
2415 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2416 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2417 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2418 		break;
2419 	}
2420 
2421 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2422 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2423 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2424 		break;
2425 	}
2426 
2427 	case SIGNAL_TYPE_LVDS: {
2428 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2429 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2430 		break;
2431 	}
2432 
2433 	case SIGNAL_TYPE_EDP: {
2434 		sink_caps.transaction_type =
2435 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2436 		sink_caps.signal = SIGNAL_TYPE_EDP;
2437 		break;
2438 	}
2439 
2440 	case SIGNAL_TYPE_DISPLAY_PORT: {
2441 		sink_caps.transaction_type =
2442 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2443 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2444 		break;
2445 	}
2446 
2447 	default:
2448 		DC_ERROR("Invalid connector type! signal:%d\n",
2449 			link->connector_signal);
2450 		return;
2451 	}
2452 
2453 	sink_init_data.link = link;
2454 	sink_init_data.sink_signal = sink_caps.signal;
2455 
2456 	sink = dc_sink_create(&sink_init_data);
2457 	if (!sink) {
2458 		DC_ERROR("Failed to create sink!\n");
2459 		return;
2460 	}
2461 
2462 	/* dc_sink_create returns a new reference */
2463 	link->local_sink = sink;
2464 
2465 	edid_status = dm_helpers_read_local_edid(
2466 			link->ctx,
2467 			link,
2468 			sink);
2469 
2470 	if (edid_status != EDID_OK)
2471 		DC_ERROR("Failed to read EDID");
2472 
2473 }
2474 
2475 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2476 				     struct amdgpu_display_manager *dm)
2477 {
2478 	struct {
2479 		struct dc_surface_update surface_updates[MAX_SURFACES];
2480 		struct dc_plane_info plane_infos[MAX_SURFACES];
2481 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2482 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2483 		struct dc_stream_update stream_update;
2484 	} * bundle;
2485 	int k, m;
2486 
2487 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2488 
2489 	if (!bundle) {
2490 		dm_error("Failed to allocate update bundle\n");
2491 		goto cleanup;
2492 	}
2493 
2494 	for (k = 0; k < dc_state->stream_count; k++) {
2495 		bundle->stream_update.stream = dc_state->streams[k];
2496 
2497 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2498 			bundle->surface_updates[m].surface =
2499 				dc_state->stream_status->plane_states[m];
2500 			bundle->surface_updates[m].surface->force_full_update =
2501 				true;
2502 		}
2503 		dc_commit_updates_for_stream(
2504 			dm->dc, bundle->surface_updates,
2505 			dc_state->stream_status->plane_count,
2506 			dc_state->streams[k], &bundle->stream_update, dc_state);
2507 	}
2508 
2509 cleanup:
2510 	kfree(bundle);
2511 
2512 	return;
2513 }
2514 
2515 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2516 {
2517 	struct dc_stream_state *stream_state;
2518 	struct amdgpu_dm_connector *aconnector = link->priv;
2519 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2520 	struct dc_stream_update stream_update;
2521 	bool dpms_off = true;
2522 
2523 	memset(&stream_update, 0, sizeof(stream_update));
2524 	stream_update.dpms_off = &dpms_off;
2525 
2526 	mutex_lock(&adev->dm.dc_lock);
2527 	stream_state = dc_stream_find_from_link(link);
2528 
2529 	if (stream_state == NULL) {
2530 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2531 		mutex_unlock(&adev->dm.dc_lock);
2532 		return;
2533 	}
2534 
2535 	stream_update.stream = stream_state;
2536 	acrtc_state->force_dpms_off = true;
2537 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2538 				     stream_state, &stream_update,
2539 				     stream_state->ctx->dc->current_state);
2540 	mutex_unlock(&adev->dm.dc_lock);
2541 }
2542 
2543 static int dm_resume(void *handle)
2544 {
2545 	struct amdgpu_device *adev = handle;
2546 	struct drm_device *ddev = adev_to_drm(adev);
2547 	struct amdgpu_display_manager *dm = &adev->dm;
2548 	struct amdgpu_dm_connector *aconnector;
2549 	struct drm_connector *connector;
2550 	struct drm_connector_list_iter iter;
2551 	struct drm_crtc *crtc;
2552 	struct drm_crtc_state *new_crtc_state;
2553 	struct dm_crtc_state *dm_new_crtc_state;
2554 	struct drm_plane *plane;
2555 	struct drm_plane_state *new_plane_state;
2556 	struct dm_plane_state *dm_new_plane_state;
2557 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2558 	enum dc_connection_type new_connection_type = dc_connection_none;
2559 	struct dc_state *dc_state;
2560 	int i, r, j;
2561 
2562 	if (amdgpu_in_reset(adev)) {
2563 		dc_state = dm->cached_dc_state;
2564 
2565 		/*
2566 		 * The dc->current_state is backed up into dm->cached_dc_state
2567 		 * before we commit 0 streams.
2568 		 *
2569 		 * DC will clear link encoder assignments on the real state
2570 		 * but the changes won't propagate over to the copy we made
2571 		 * before the 0 streams commit.
2572 		 *
2573 		 * DC expects that link encoder assignments are *not* valid
2574 		 * when committing a state, so as a workaround it needs to be
2575 		 * cleared here.
2576 		 */
2577 		link_enc_cfg_init(dm->dc, dc_state);
2578 
2579 		if (dc_enable_dmub_notifications(adev->dm.dc))
2580 			amdgpu_dm_outbox_init(adev);
2581 
2582 		r = dm_dmub_hw_init(adev);
2583 		if (r)
2584 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2585 
2586 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2587 		dc_resume(dm->dc);
2588 
2589 		amdgpu_dm_irq_resume_early(adev);
2590 
2591 		for (i = 0; i < dc_state->stream_count; i++) {
2592 			dc_state->streams[i]->mode_changed = true;
2593 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2594 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2595 					= 0xffffffff;
2596 			}
2597 		}
2598 #if defined(CONFIG_DRM_AMD_DC_DCN)
2599 		/*
2600 		 * Resource allocation happens for link encoders for newer ASIC in
2601 		 * dc_validate_global_state, so we need to revalidate it.
2602 		 *
2603 		 * This shouldn't fail (it passed once before), so warn if it does.
2604 		 */
2605 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2606 #endif
2607 
2608 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2609 
2610 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2611 
2612 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2613 
2614 		dc_release_state(dm->cached_dc_state);
2615 		dm->cached_dc_state = NULL;
2616 
2617 		amdgpu_dm_irq_resume_late(adev);
2618 
2619 		mutex_unlock(&dm->dc_lock);
2620 
2621 		return 0;
2622 	}
2623 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2624 	dc_release_state(dm_state->context);
2625 	dm_state->context = dc_create_state(dm->dc);
2626 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2627 	dc_resource_state_construct(dm->dc, dm_state->context);
2628 
2629 	/* Re-enable outbox interrupts for DPIA. */
2630 	if (dc_enable_dmub_notifications(adev->dm.dc))
2631 		amdgpu_dm_outbox_init(adev);
2632 
2633 	/* Before powering on DC we need to re-initialize DMUB. */
2634 	r = dm_dmub_hw_init(adev);
2635 	if (r)
2636 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2637 
2638 	/* power on hardware */
2639 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2640 
2641 	/* program HPD filter */
2642 	dc_resume(dm->dc);
2643 
2644 	/*
2645 	 * early enable HPD Rx IRQ, should be done before set mode as short
2646 	 * pulse interrupts are used for MST
2647 	 */
2648 	amdgpu_dm_irq_resume_early(adev);
2649 
2650 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2651 	s3_handle_mst(ddev, false);
2652 
2653 	/* Do detection*/
2654 	drm_connector_list_iter_begin(ddev, &iter);
2655 	drm_for_each_connector_iter(connector, &iter) {
2656 		aconnector = to_amdgpu_dm_connector(connector);
2657 
2658 		/*
2659 		 * this is the case when traversing through already created
2660 		 * MST connectors, should be skipped
2661 		 */
2662 		if (aconnector->mst_port)
2663 			continue;
2664 
2665 		mutex_lock(&aconnector->hpd_lock);
2666 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2667 			DRM_ERROR("KMS: Failed to detect connector\n");
2668 
2669 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2670 			emulated_link_detect(aconnector->dc_link);
2671 		else
2672 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2673 
2674 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2675 			aconnector->fake_enable = false;
2676 
2677 		if (aconnector->dc_sink)
2678 			dc_sink_release(aconnector->dc_sink);
2679 		aconnector->dc_sink = NULL;
2680 		amdgpu_dm_update_connector_after_detect(aconnector);
2681 		mutex_unlock(&aconnector->hpd_lock);
2682 	}
2683 	drm_connector_list_iter_end(&iter);
2684 
2685 	/* Force mode set in atomic commit */
2686 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2687 		new_crtc_state->active_changed = true;
2688 
2689 	/*
2690 	 * atomic_check is expected to create the dc states. We need to release
2691 	 * them here, since they were duplicated as part of the suspend
2692 	 * procedure.
2693 	 */
2694 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2695 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2696 		if (dm_new_crtc_state->stream) {
2697 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2698 			dc_stream_release(dm_new_crtc_state->stream);
2699 			dm_new_crtc_state->stream = NULL;
2700 		}
2701 	}
2702 
2703 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2704 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2705 		if (dm_new_plane_state->dc_state) {
2706 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2707 			dc_plane_state_release(dm_new_plane_state->dc_state);
2708 			dm_new_plane_state->dc_state = NULL;
2709 		}
2710 	}
2711 
2712 	drm_atomic_helper_resume(ddev, dm->cached_state);
2713 
2714 	dm->cached_state = NULL;
2715 
2716 	amdgpu_dm_irq_resume_late(adev);
2717 
2718 	amdgpu_dm_smu_write_watermarks_table(adev);
2719 
2720 	return 0;
2721 }
2722 
2723 /**
2724  * DOC: DM Lifecycle
2725  *
2726  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2727  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2728  * the base driver's device list to be initialized and torn down accordingly.
2729  *
2730  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2731  */
2732 
2733 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2734 	.name = "dm",
2735 	.early_init = dm_early_init,
2736 	.late_init = dm_late_init,
2737 	.sw_init = dm_sw_init,
2738 	.sw_fini = dm_sw_fini,
2739 	.early_fini = amdgpu_dm_early_fini,
2740 	.hw_init = dm_hw_init,
2741 	.hw_fini = dm_hw_fini,
2742 	.suspend = dm_suspend,
2743 	.resume = dm_resume,
2744 	.is_idle = dm_is_idle,
2745 	.wait_for_idle = dm_wait_for_idle,
2746 	.check_soft_reset = dm_check_soft_reset,
2747 	.soft_reset = dm_soft_reset,
2748 	.set_clockgating_state = dm_set_clockgating_state,
2749 	.set_powergating_state = dm_set_powergating_state,
2750 };
2751 
2752 const struct amdgpu_ip_block_version dm_ip_block =
2753 {
2754 	.type = AMD_IP_BLOCK_TYPE_DCE,
2755 	.major = 1,
2756 	.minor = 0,
2757 	.rev = 0,
2758 	.funcs = &amdgpu_dm_funcs,
2759 };
2760 
2761 
2762 /**
2763  * DOC: atomic
2764  *
2765  * *WIP*
2766  */
2767 
2768 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2769 	.fb_create = amdgpu_display_user_framebuffer_create,
2770 	.get_format_info = amd_get_format_info,
2771 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2772 	.atomic_check = amdgpu_dm_atomic_check,
2773 	.atomic_commit = drm_atomic_helper_commit,
2774 };
2775 
2776 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2777 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2778 };
2779 
2780 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2781 {
2782 	u32 max_cll, min_cll, max, min, q, r;
2783 	struct amdgpu_dm_backlight_caps *caps;
2784 	struct amdgpu_display_manager *dm;
2785 	struct drm_connector *conn_base;
2786 	struct amdgpu_device *adev;
2787 	struct dc_link *link = NULL;
2788 	static const u8 pre_computed_values[] = {
2789 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2790 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2791 	int i;
2792 
2793 	if (!aconnector || !aconnector->dc_link)
2794 		return;
2795 
2796 	link = aconnector->dc_link;
2797 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2798 		return;
2799 
2800 	conn_base = &aconnector->base;
2801 	adev = drm_to_adev(conn_base->dev);
2802 	dm = &adev->dm;
2803 	for (i = 0; i < dm->num_of_edps; i++) {
2804 		if (link == dm->backlight_link[i])
2805 			break;
2806 	}
2807 	if (i >= dm->num_of_edps)
2808 		return;
2809 	caps = &dm->backlight_caps[i];
2810 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2811 	caps->aux_support = false;
2812 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2813 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2814 
2815 	if (caps->ext_caps->bits.oled == 1 /*||
2816 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2817 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2818 		caps->aux_support = true;
2819 
2820 	if (amdgpu_backlight == 0)
2821 		caps->aux_support = false;
2822 	else if (amdgpu_backlight == 1)
2823 		caps->aux_support = true;
2824 
2825 	/* From the specification (CTA-861-G), for calculating the maximum
2826 	 * luminance we need to use:
2827 	 *	Luminance = 50*2**(CV/32)
2828 	 * Where CV is a one-byte value.
2829 	 * For calculating this expression we may need float point precision;
2830 	 * to avoid this complexity level, we take advantage that CV is divided
2831 	 * by a constant. From the Euclids division algorithm, we know that CV
2832 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2833 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2834 	 * need to pre-compute the value of r/32. For pre-computing the values
2835 	 * We just used the following Ruby line:
2836 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2837 	 * The results of the above expressions can be verified at
2838 	 * pre_computed_values.
2839 	 */
2840 	q = max_cll >> 5;
2841 	r = max_cll % 32;
2842 	max = (1 << q) * pre_computed_values[r];
2843 
2844 	// min luminance: maxLum * (CV/255)^2 / 100
2845 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2846 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2847 
2848 	caps->aux_max_input_signal = max;
2849 	caps->aux_min_input_signal = min;
2850 }
2851 
2852 void amdgpu_dm_update_connector_after_detect(
2853 		struct amdgpu_dm_connector *aconnector)
2854 {
2855 	struct drm_connector *connector = &aconnector->base;
2856 	struct drm_device *dev = connector->dev;
2857 	struct dc_sink *sink;
2858 
2859 	/* MST handled by drm_mst framework */
2860 	if (aconnector->mst_mgr.mst_state == true)
2861 		return;
2862 
2863 	sink = aconnector->dc_link->local_sink;
2864 	if (sink)
2865 		dc_sink_retain(sink);
2866 
2867 	/*
2868 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2869 	 * the connector sink is set to either fake or physical sink depends on link status.
2870 	 * Skip if already done during boot.
2871 	 */
2872 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2873 			&& aconnector->dc_em_sink) {
2874 
2875 		/*
2876 		 * For S3 resume with headless use eml_sink to fake stream
2877 		 * because on resume connector->sink is set to NULL
2878 		 */
2879 		mutex_lock(&dev->mode_config.mutex);
2880 
2881 		if (sink) {
2882 			if (aconnector->dc_sink) {
2883 				amdgpu_dm_update_freesync_caps(connector, NULL);
2884 				/*
2885 				 * retain and release below are used to
2886 				 * bump up refcount for sink because the link doesn't point
2887 				 * to it anymore after disconnect, so on next crtc to connector
2888 				 * reshuffle by UMD we will get into unwanted dc_sink release
2889 				 */
2890 				dc_sink_release(aconnector->dc_sink);
2891 			}
2892 			aconnector->dc_sink = sink;
2893 			dc_sink_retain(aconnector->dc_sink);
2894 			amdgpu_dm_update_freesync_caps(connector,
2895 					aconnector->edid);
2896 		} else {
2897 			amdgpu_dm_update_freesync_caps(connector, NULL);
2898 			if (!aconnector->dc_sink) {
2899 				aconnector->dc_sink = aconnector->dc_em_sink;
2900 				dc_sink_retain(aconnector->dc_sink);
2901 			}
2902 		}
2903 
2904 		mutex_unlock(&dev->mode_config.mutex);
2905 
2906 		if (sink)
2907 			dc_sink_release(sink);
2908 		return;
2909 	}
2910 
2911 	/*
2912 	 * TODO: temporary guard to look for proper fix
2913 	 * if this sink is MST sink, we should not do anything
2914 	 */
2915 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2916 		dc_sink_release(sink);
2917 		return;
2918 	}
2919 
2920 	if (aconnector->dc_sink == sink) {
2921 		/*
2922 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2923 		 * Do nothing!!
2924 		 */
2925 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2926 				aconnector->connector_id);
2927 		if (sink)
2928 			dc_sink_release(sink);
2929 		return;
2930 	}
2931 
2932 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2933 		aconnector->connector_id, aconnector->dc_sink, sink);
2934 
2935 	mutex_lock(&dev->mode_config.mutex);
2936 
2937 	/*
2938 	 * 1. Update status of the drm connector
2939 	 * 2. Send an event and let userspace tell us what to do
2940 	 */
2941 	if (sink) {
2942 		/*
2943 		 * TODO: check if we still need the S3 mode update workaround.
2944 		 * If yes, put it here.
2945 		 */
2946 		if (aconnector->dc_sink) {
2947 			amdgpu_dm_update_freesync_caps(connector, NULL);
2948 			dc_sink_release(aconnector->dc_sink);
2949 		}
2950 
2951 		aconnector->dc_sink = sink;
2952 		dc_sink_retain(aconnector->dc_sink);
2953 		if (sink->dc_edid.length == 0) {
2954 			aconnector->edid = NULL;
2955 			if (aconnector->dc_link->aux_mode) {
2956 				drm_dp_cec_unset_edid(
2957 					&aconnector->dm_dp_aux.aux);
2958 			}
2959 		} else {
2960 			aconnector->edid =
2961 				(struct edid *)sink->dc_edid.raw_edid;
2962 
2963 			drm_connector_update_edid_property(connector,
2964 							   aconnector->edid);
2965 			if (aconnector->dc_link->aux_mode)
2966 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2967 						    aconnector->edid);
2968 		}
2969 
2970 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2971 		update_connector_ext_caps(aconnector);
2972 	} else {
2973 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2974 		amdgpu_dm_update_freesync_caps(connector, NULL);
2975 		drm_connector_update_edid_property(connector, NULL);
2976 		aconnector->num_modes = 0;
2977 		dc_sink_release(aconnector->dc_sink);
2978 		aconnector->dc_sink = NULL;
2979 		aconnector->edid = NULL;
2980 #ifdef CONFIG_DRM_AMD_DC_HDCP
2981 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2982 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2983 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2984 #endif
2985 	}
2986 
2987 	mutex_unlock(&dev->mode_config.mutex);
2988 
2989 	update_subconnector_property(aconnector);
2990 
2991 	if (sink)
2992 		dc_sink_release(sink);
2993 }
2994 
2995 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2996 {
2997 	struct drm_connector *connector = &aconnector->base;
2998 	struct drm_device *dev = connector->dev;
2999 	enum dc_connection_type new_connection_type = dc_connection_none;
3000 	struct amdgpu_device *adev = drm_to_adev(dev);
3001 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3002 	struct dm_crtc_state *dm_crtc_state = NULL;
3003 
3004 	if (adev->dm.disable_hpd_irq)
3005 		return;
3006 
3007 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3008 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3009 					dm_con_state->base.state,
3010 					dm_con_state->base.crtc));
3011 	/*
3012 	 * In case of failure or MST no need to update connector status or notify the OS
3013 	 * since (for MST case) MST does this in its own context.
3014 	 */
3015 	mutex_lock(&aconnector->hpd_lock);
3016 
3017 #ifdef CONFIG_DRM_AMD_DC_HDCP
3018 	if (adev->dm.hdcp_workqueue) {
3019 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3020 		dm_con_state->update_hdcp = true;
3021 	}
3022 #endif
3023 	if (aconnector->fake_enable)
3024 		aconnector->fake_enable = false;
3025 
3026 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3027 		DRM_ERROR("KMS: Failed to detect connector\n");
3028 
3029 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3030 		emulated_link_detect(aconnector->dc_link);
3031 
3032 		drm_modeset_lock_all(dev);
3033 		dm_restore_drm_connector_state(dev, connector);
3034 		drm_modeset_unlock_all(dev);
3035 
3036 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3037 			drm_kms_helper_hotplug_event(dev);
3038 
3039 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3040 		if (new_connection_type == dc_connection_none &&
3041 		    aconnector->dc_link->type == dc_connection_none &&
3042 		    dm_crtc_state)
3043 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3044 
3045 		amdgpu_dm_update_connector_after_detect(aconnector);
3046 
3047 		drm_modeset_lock_all(dev);
3048 		dm_restore_drm_connector_state(dev, connector);
3049 		drm_modeset_unlock_all(dev);
3050 
3051 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3052 			drm_kms_helper_hotplug_event(dev);
3053 	}
3054 	mutex_unlock(&aconnector->hpd_lock);
3055 
3056 }
3057 
3058 static void handle_hpd_irq(void *param)
3059 {
3060 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3061 
3062 	handle_hpd_irq_helper(aconnector);
3063 
3064 }
3065 
3066 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3067 {
3068 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3069 	uint8_t dret;
3070 	bool new_irq_handled = false;
3071 	int dpcd_addr;
3072 	int dpcd_bytes_to_read;
3073 
3074 	const int max_process_count = 30;
3075 	int process_count = 0;
3076 
3077 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3078 
3079 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3080 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3081 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3082 		dpcd_addr = DP_SINK_COUNT;
3083 	} else {
3084 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3085 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3086 		dpcd_addr = DP_SINK_COUNT_ESI;
3087 	}
3088 
3089 	dret = drm_dp_dpcd_read(
3090 		&aconnector->dm_dp_aux.aux,
3091 		dpcd_addr,
3092 		esi,
3093 		dpcd_bytes_to_read);
3094 
3095 	while (dret == dpcd_bytes_to_read &&
3096 		process_count < max_process_count) {
3097 		uint8_t retry;
3098 		dret = 0;
3099 
3100 		process_count++;
3101 
3102 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3103 		/* handle HPD short pulse irq */
3104 		if (aconnector->mst_mgr.mst_state)
3105 			drm_dp_mst_hpd_irq(
3106 				&aconnector->mst_mgr,
3107 				esi,
3108 				&new_irq_handled);
3109 
3110 		if (new_irq_handled) {
3111 			/* ACK at DPCD to notify down stream */
3112 			const int ack_dpcd_bytes_to_write =
3113 				dpcd_bytes_to_read - 1;
3114 
3115 			for (retry = 0; retry < 3; retry++) {
3116 				uint8_t wret;
3117 
3118 				wret = drm_dp_dpcd_write(
3119 					&aconnector->dm_dp_aux.aux,
3120 					dpcd_addr + 1,
3121 					&esi[1],
3122 					ack_dpcd_bytes_to_write);
3123 				if (wret == ack_dpcd_bytes_to_write)
3124 					break;
3125 			}
3126 
3127 			/* check if there is new irq to be handled */
3128 			dret = drm_dp_dpcd_read(
3129 				&aconnector->dm_dp_aux.aux,
3130 				dpcd_addr,
3131 				esi,
3132 				dpcd_bytes_to_read);
3133 
3134 			new_irq_handled = false;
3135 		} else {
3136 			break;
3137 		}
3138 	}
3139 
3140 	if (process_count == max_process_count)
3141 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3142 }
3143 
3144 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3145 							union hpd_irq_data hpd_irq_data)
3146 {
3147 	struct hpd_rx_irq_offload_work *offload_work =
3148 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3149 
3150 	if (!offload_work) {
3151 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3152 		return;
3153 	}
3154 
3155 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3156 	offload_work->data = hpd_irq_data;
3157 	offload_work->offload_wq = offload_wq;
3158 
3159 	queue_work(offload_wq->wq, &offload_work->work);
3160 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3161 }
3162 
3163 static void handle_hpd_rx_irq(void *param)
3164 {
3165 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3166 	struct drm_connector *connector = &aconnector->base;
3167 	struct drm_device *dev = connector->dev;
3168 	struct dc_link *dc_link = aconnector->dc_link;
3169 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3170 	bool result = false;
3171 	enum dc_connection_type new_connection_type = dc_connection_none;
3172 	struct amdgpu_device *adev = drm_to_adev(dev);
3173 	union hpd_irq_data hpd_irq_data;
3174 	bool link_loss = false;
3175 	bool has_left_work = false;
3176 	int idx = aconnector->base.index;
3177 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3178 
3179 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3180 
3181 	if (adev->dm.disable_hpd_irq)
3182 		return;
3183 
3184 	/*
3185 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3186 	 * conflict, after implement i2c helper, this mutex should be
3187 	 * retired.
3188 	 */
3189 	mutex_lock(&aconnector->hpd_lock);
3190 
3191 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3192 						&link_loss, true, &has_left_work);
3193 
3194 	if (!has_left_work)
3195 		goto out;
3196 
3197 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3198 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3199 		goto out;
3200 	}
3201 
3202 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3203 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3204 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3205 			dm_handle_mst_sideband_msg(aconnector);
3206 			goto out;
3207 		}
3208 
3209 		if (link_loss) {
3210 			bool skip = false;
3211 
3212 			spin_lock(&offload_wq->offload_lock);
3213 			skip = offload_wq->is_handling_link_loss;
3214 
3215 			if (!skip)
3216 				offload_wq->is_handling_link_loss = true;
3217 
3218 			spin_unlock(&offload_wq->offload_lock);
3219 
3220 			if (!skip)
3221 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3222 
3223 			goto out;
3224 		}
3225 	}
3226 
3227 out:
3228 	if (result && !is_mst_root_connector) {
3229 		/* Downstream Port status changed. */
3230 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3231 			DRM_ERROR("KMS: Failed to detect connector\n");
3232 
3233 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3234 			emulated_link_detect(dc_link);
3235 
3236 			if (aconnector->fake_enable)
3237 				aconnector->fake_enable = false;
3238 
3239 			amdgpu_dm_update_connector_after_detect(aconnector);
3240 
3241 
3242 			drm_modeset_lock_all(dev);
3243 			dm_restore_drm_connector_state(dev, connector);
3244 			drm_modeset_unlock_all(dev);
3245 
3246 			drm_kms_helper_hotplug_event(dev);
3247 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3248 
3249 			if (aconnector->fake_enable)
3250 				aconnector->fake_enable = false;
3251 
3252 			amdgpu_dm_update_connector_after_detect(aconnector);
3253 
3254 
3255 			drm_modeset_lock_all(dev);
3256 			dm_restore_drm_connector_state(dev, connector);
3257 			drm_modeset_unlock_all(dev);
3258 
3259 			drm_kms_helper_hotplug_event(dev);
3260 		}
3261 	}
3262 #ifdef CONFIG_DRM_AMD_DC_HDCP
3263 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3264 		if (adev->dm.hdcp_workqueue)
3265 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3266 	}
3267 #endif
3268 
3269 	if (dc_link->type != dc_connection_mst_branch)
3270 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3271 
3272 	mutex_unlock(&aconnector->hpd_lock);
3273 }
3274 
3275 static void register_hpd_handlers(struct amdgpu_device *adev)
3276 {
3277 	struct drm_device *dev = adev_to_drm(adev);
3278 	struct drm_connector *connector;
3279 	struct amdgpu_dm_connector *aconnector;
3280 	const struct dc_link *dc_link;
3281 	struct dc_interrupt_params int_params = {0};
3282 
3283 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3284 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3285 
3286 	list_for_each_entry(connector,
3287 			&dev->mode_config.connector_list, head)	{
3288 
3289 		aconnector = to_amdgpu_dm_connector(connector);
3290 		dc_link = aconnector->dc_link;
3291 
3292 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3293 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3294 			int_params.irq_source = dc_link->irq_source_hpd;
3295 
3296 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3297 					handle_hpd_irq,
3298 					(void *) aconnector);
3299 		}
3300 
3301 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3302 
3303 			/* Also register for DP short pulse (hpd_rx). */
3304 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3305 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3306 
3307 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3308 					handle_hpd_rx_irq,
3309 					(void *) aconnector);
3310 
3311 			if (adev->dm.hpd_rx_offload_wq)
3312 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3313 					aconnector;
3314 		}
3315 	}
3316 }
3317 
3318 #if defined(CONFIG_DRM_AMD_DC_SI)
3319 /* Register IRQ sources and initialize IRQ callbacks */
3320 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3321 {
3322 	struct dc *dc = adev->dm.dc;
3323 	struct common_irq_params *c_irq_params;
3324 	struct dc_interrupt_params int_params = {0};
3325 	int r;
3326 	int i;
3327 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3328 
3329 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3330 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3331 
3332 	/*
3333 	 * Actions of amdgpu_irq_add_id():
3334 	 * 1. Register a set() function with base driver.
3335 	 *    Base driver will call set() function to enable/disable an
3336 	 *    interrupt in DC hardware.
3337 	 * 2. Register amdgpu_dm_irq_handler().
3338 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3339 	 *    coming from DC hardware.
3340 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3341 	 *    for acknowledging and handling. */
3342 
3343 	/* Use VBLANK interrupt */
3344 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3345 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3346 		if (r) {
3347 			DRM_ERROR("Failed to add crtc irq id!\n");
3348 			return r;
3349 		}
3350 
3351 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3352 		int_params.irq_source =
3353 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3354 
3355 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3356 
3357 		c_irq_params->adev = adev;
3358 		c_irq_params->irq_src = int_params.irq_source;
3359 
3360 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3361 				dm_crtc_high_irq, c_irq_params);
3362 	}
3363 
3364 	/* Use GRPH_PFLIP interrupt */
3365 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3366 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3367 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3368 		if (r) {
3369 			DRM_ERROR("Failed to add page flip irq id!\n");
3370 			return r;
3371 		}
3372 
3373 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3374 		int_params.irq_source =
3375 			dc_interrupt_to_irq_source(dc, i, 0);
3376 
3377 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3378 
3379 		c_irq_params->adev = adev;
3380 		c_irq_params->irq_src = int_params.irq_source;
3381 
3382 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3383 				dm_pflip_high_irq, c_irq_params);
3384 
3385 	}
3386 
3387 	/* HPD */
3388 	r = amdgpu_irq_add_id(adev, client_id,
3389 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3390 	if (r) {
3391 		DRM_ERROR("Failed to add hpd irq id!\n");
3392 		return r;
3393 	}
3394 
3395 	register_hpd_handlers(adev);
3396 
3397 	return 0;
3398 }
3399 #endif
3400 
3401 /* Register IRQ sources and initialize IRQ callbacks */
3402 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3403 {
3404 	struct dc *dc = adev->dm.dc;
3405 	struct common_irq_params *c_irq_params;
3406 	struct dc_interrupt_params int_params = {0};
3407 	int r;
3408 	int i;
3409 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3410 
3411 	if (adev->family >= AMDGPU_FAMILY_AI)
3412 		client_id = SOC15_IH_CLIENTID_DCE;
3413 
3414 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3415 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3416 
3417 	/*
3418 	 * Actions of amdgpu_irq_add_id():
3419 	 * 1. Register a set() function with base driver.
3420 	 *    Base driver will call set() function to enable/disable an
3421 	 *    interrupt in DC hardware.
3422 	 * 2. Register amdgpu_dm_irq_handler().
3423 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3424 	 *    coming from DC hardware.
3425 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3426 	 *    for acknowledging and handling. */
3427 
3428 	/* Use VBLANK interrupt */
3429 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3430 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3431 		if (r) {
3432 			DRM_ERROR("Failed to add crtc irq id!\n");
3433 			return r;
3434 		}
3435 
3436 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3437 		int_params.irq_source =
3438 			dc_interrupt_to_irq_source(dc, i, 0);
3439 
3440 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3441 
3442 		c_irq_params->adev = adev;
3443 		c_irq_params->irq_src = int_params.irq_source;
3444 
3445 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3446 				dm_crtc_high_irq, c_irq_params);
3447 	}
3448 
3449 	/* Use VUPDATE interrupt */
3450 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3451 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3452 		if (r) {
3453 			DRM_ERROR("Failed to add vupdate irq id!\n");
3454 			return r;
3455 		}
3456 
3457 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3458 		int_params.irq_source =
3459 			dc_interrupt_to_irq_source(dc, i, 0);
3460 
3461 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3462 
3463 		c_irq_params->adev = adev;
3464 		c_irq_params->irq_src = int_params.irq_source;
3465 
3466 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3467 				dm_vupdate_high_irq, c_irq_params);
3468 	}
3469 
3470 	/* Use GRPH_PFLIP interrupt */
3471 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3472 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3473 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3474 		if (r) {
3475 			DRM_ERROR("Failed to add page flip irq id!\n");
3476 			return r;
3477 		}
3478 
3479 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3480 		int_params.irq_source =
3481 			dc_interrupt_to_irq_source(dc, i, 0);
3482 
3483 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3484 
3485 		c_irq_params->adev = adev;
3486 		c_irq_params->irq_src = int_params.irq_source;
3487 
3488 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3489 				dm_pflip_high_irq, c_irq_params);
3490 
3491 	}
3492 
3493 	/* HPD */
3494 	r = amdgpu_irq_add_id(adev, client_id,
3495 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3496 	if (r) {
3497 		DRM_ERROR("Failed to add hpd irq id!\n");
3498 		return r;
3499 	}
3500 
3501 	register_hpd_handlers(adev);
3502 
3503 	return 0;
3504 }
3505 
3506 #if defined(CONFIG_DRM_AMD_DC_DCN)
3507 /* Register IRQ sources and initialize IRQ callbacks */
3508 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3509 {
3510 	struct dc *dc = adev->dm.dc;
3511 	struct common_irq_params *c_irq_params;
3512 	struct dc_interrupt_params int_params = {0};
3513 	int r;
3514 	int i;
3515 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3516 	static const unsigned int vrtl_int_srcid[] = {
3517 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3518 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3519 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3520 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3521 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3522 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3523 	};
3524 #endif
3525 
3526 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3527 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3528 
3529 	/*
3530 	 * Actions of amdgpu_irq_add_id():
3531 	 * 1. Register a set() function with base driver.
3532 	 *    Base driver will call set() function to enable/disable an
3533 	 *    interrupt in DC hardware.
3534 	 * 2. Register amdgpu_dm_irq_handler().
3535 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3536 	 *    coming from DC hardware.
3537 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3538 	 *    for acknowledging and handling.
3539 	 */
3540 
3541 	/* Use VSTARTUP interrupt */
3542 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3543 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3544 			i++) {
3545 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3546 
3547 		if (r) {
3548 			DRM_ERROR("Failed to add crtc irq id!\n");
3549 			return r;
3550 		}
3551 
3552 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3553 		int_params.irq_source =
3554 			dc_interrupt_to_irq_source(dc, i, 0);
3555 
3556 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3557 
3558 		c_irq_params->adev = adev;
3559 		c_irq_params->irq_src = int_params.irq_source;
3560 
3561 		amdgpu_dm_irq_register_interrupt(
3562 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3563 	}
3564 
3565 	/* Use otg vertical line interrupt */
3566 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3567 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3568 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3569 				vrtl_int_srcid[i], &adev->vline0_irq);
3570 
3571 		if (r) {
3572 			DRM_ERROR("Failed to add vline0 irq id!\n");
3573 			return r;
3574 		}
3575 
3576 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3577 		int_params.irq_source =
3578 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3579 
3580 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3581 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3582 			break;
3583 		}
3584 
3585 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3586 					- DC_IRQ_SOURCE_DC1_VLINE0];
3587 
3588 		c_irq_params->adev = adev;
3589 		c_irq_params->irq_src = int_params.irq_source;
3590 
3591 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3592 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3593 	}
3594 #endif
3595 
3596 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3597 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3598 	 * to trigger at end of each vblank, regardless of state of the lock,
3599 	 * matching DCE behaviour.
3600 	 */
3601 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3602 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3603 	     i++) {
3604 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3605 
3606 		if (r) {
3607 			DRM_ERROR("Failed to add vupdate irq id!\n");
3608 			return r;
3609 		}
3610 
3611 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3612 		int_params.irq_source =
3613 			dc_interrupt_to_irq_source(dc, i, 0);
3614 
3615 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3616 
3617 		c_irq_params->adev = adev;
3618 		c_irq_params->irq_src = int_params.irq_source;
3619 
3620 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3621 				dm_vupdate_high_irq, c_irq_params);
3622 	}
3623 
3624 	/* Use GRPH_PFLIP interrupt */
3625 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3626 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3627 			i++) {
3628 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3629 		if (r) {
3630 			DRM_ERROR("Failed to add page flip irq id!\n");
3631 			return r;
3632 		}
3633 
3634 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3635 		int_params.irq_source =
3636 			dc_interrupt_to_irq_source(dc, i, 0);
3637 
3638 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3639 
3640 		c_irq_params->adev = adev;
3641 		c_irq_params->irq_src = int_params.irq_source;
3642 
3643 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3644 				dm_pflip_high_irq, c_irq_params);
3645 
3646 	}
3647 
3648 	/* HPD */
3649 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3650 			&adev->hpd_irq);
3651 	if (r) {
3652 		DRM_ERROR("Failed to add hpd irq id!\n");
3653 		return r;
3654 	}
3655 
3656 	register_hpd_handlers(adev);
3657 
3658 	return 0;
3659 }
3660 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3661 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3662 {
3663 	struct dc *dc = adev->dm.dc;
3664 	struct common_irq_params *c_irq_params;
3665 	struct dc_interrupt_params int_params = {0};
3666 	int r, i;
3667 
3668 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3669 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3670 
3671 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3672 			&adev->dmub_outbox_irq);
3673 	if (r) {
3674 		DRM_ERROR("Failed to add outbox irq id!\n");
3675 		return r;
3676 	}
3677 
3678 	if (dc->ctx->dmub_srv) {
3679 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3680 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3681 		int_params.irq_source =
3682 		dc_interrupt_to_irq_source(dc, i, 0);
3683 
3684 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3685 
3686 		c_irq_params->adev = adev;
3687 		c_irq_params->irq_src = int_params.irq_source;
3688 
3689 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3690 				dm_dmub_outbox1_low_irq, c_irq_params);
3691 	}
3692 
3693 	return 0;
3694 }
3695 #endif
3696 
3697 /*
3698  * Acquires the lock for the atomic state object and returns
3699  * the new atomic state.
3700  *
3701  * This should only be called during atomic check.
3702  */
3703 static int dm_atomic_get_state(struct drm_atomic_state *state,
3704 			       struct dm_atomic_state **dm_state)
3705 {
3706 	struct drm_device *dev = state->dev;
3707 	struct amdgpu_device *adev = drm_to_adev(dev);
3708 	struct amdgpu_display_manager *dm = &adev->dm;
3709 	struct drm_private_state *priv_state;
3710 
3711 	if (*dm_state)
3712 		return 0;
3713 
3714 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3715 	if (IS_ERR(priv_state))
3716 		return PTR_ERR(priv_state);
3717 
3718 	*dm_state = to_dm_atomic_state(priv_state);
3719 
3720 	return 0;
3721 }
3722 
3723 static struct dm_atomic_state *
3724 dm_atomic_get_new_state(struct drm_atomic_state *state)
3725 {
3726 	struct drm_device *dev = state->dev;
3727 	struct amdgpu_device *adev = drm_to_adev(dev);
3728 	struct amdgpu_display_manager *dm = &adev->dm;
3729 	struct drm_private_obj *obj;
3730 	struct drm_private_state *new_obj_state;
3731 	int i;
3732 
3733 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3734 		if (obj->funcs == dm->atomic_obj.funcs)
3735 			return to_dm_atomic_state(new_obj_state);
3736 	}
3737 
3738 	return NULL;
3739 }
3740 
3741 static struct drm_private_state *
3742 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3743 {
3744 	struct dm_atomic_state *old_state, *new_state;
3745 
3746 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3747 	if (!new_state)
3748 		return NULL;
3749 
3750 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3751 
3752 	old_state = to_dm_atomic_state(obj->state);
3753 
3754 	if (old_state && old_state->context)
3755 		new_state->context = dc_copy_state(old_state->context);
3756 
3757 	if (!new_state->context) {
3758 		kfree(new_state);
3759 		return NULL;
3760 	}
3761 
3762 	return &new_state->base;
3763 }
3764 
3765 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3766 				    struct drm_private_state *state)
3767 {
3768 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3769 
3770 	if (dm_state && dm_state->context)
3771 		dc_release_state(dm_state->context);
3772 
3773 	kfree(dm_state);
3774 }
3775 
3776 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3777 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3778 	.atomic_destroy_state = dm_atomic_destroy_state,
3779 };
3780 
3781 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3782 {
3783 	struct dm_atomic_state *state;
3784 	int r;
3785 
3786 	adev->mode_info.mode_config_initialized = true;
3787 
3788 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3789 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3790 
3791 	adev_to_drm(adev)->mode_config.max_width = 16384;
3792 	adev_to_drm(adev)->mode_config.max_height = 16384;
3793 
3794 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3795 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3796 	/* indicates support for immediate flip */
3797 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3798 
3799 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3800 
3801 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3802 	if (!state)
3803 		return -ENOMEM;
3804 
3805 	state->context = dc_create_state(adev->dm.dc);
3806 	if (!state->context) {
3807 		kfree(state);
3808 		return -ENOMEM;
3809 	}
3810 
3811 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3812 
3813 	drm_atomic_private_obj_init(adev_to_drm(adev),
3814 				    &adev->dm.atomic_obj,
3815 				    &state->base,
3816 				    &dm_atomic_state_funcs);
3817 
3818 	r = amdgpu_display_modeset_create_props(adev);
3819 	if (r) {
3820 		dc_release_state(state->context);
3821 		kfree(state);
3822 		return r;
3823 	}
3824 
3825 	r = amdgpu_dm_audio_init(adev);
3826 	if (r) {
3827 		dc_release_state(state->context);
3828 		kfree(state);
3829 		return r;
3830 	}
3831 
3832 	return 0;
3833 }
3834 
3835 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3836 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3837 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3838 
3839 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3840 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3841 
3842 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3843 					    int bl_idx)
3844 {
3845 #if defined(CONFIG_ACPI)
3846 	struct amdgpu_dm_backlight_caps caps;
3847 
3848 	memset(&caps, 0, sizeof(caps));
3849 
3850 	if (dm->backlight_caps[bl_idx].caps_valid)
3851 		return;
3852 
3853 	amdgpu_acpi_get_backlight_caps(&caps);
3854 	if (caps.caps_valid) {
3855 		dm->backlight_caps[bl_idx].caps_valid = true;
3856 		if (caps.aux_support)
3857 			return;
3858 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3859 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3860 	} else {
3861 		dm->backlight_caps[bl_idx].min_input_signal =
3862 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3863 		dm->backlight_caps[bl_idx].max_input_signal =
3864 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3865 	}
3866 #else
3867 	if (dm->backlight_caps[bl_idx].aux_support)
3868 		return;
3869 
3870 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3871 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3872 #endif
3873 }
3874 
3875 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3876 				unsigned *min, unsigned *max)
3877 {
3878 	if (!caps)
3879 		return 0;
3880 
3881 	if (caps->aux_support) {
3882 		// Firmware limits are in nits, DC API wants millinits.
3883 		*max = 1000 * caps->aux_max_input_signal;
3884 		*min = 1000 * caps->aux_min_input_signal;
3885 	} else {
3886 		// Firmware limits are 8-bit, PWM control is 16-bit.
3887 		*max = 0x101 * caps->max_input_signal;
3888 		*min = 0x101 * caps->min_input_signal;
3889 	}
3890 	return 1;
3891 }
3892 
3893 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3894 					uint32_t brightness)
3895 {
3896 	unsigned min, max;
3897 
3898 	if (!get_brightness_range(caps, &min, &max))
3899 		return brightness;
3900 
3901 	// Rescale 0..255 to min..max
3902 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3903 				       AMDGPU_MAX_BL_LEVEL);
3904 }
3905 
3906 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3907 				      uint32_t brightness)
3908 {
3909 	unsigned min, max;
3910 
3911 	if (!get_brightness_range(caps, &min, &max))
3912 		return brightness;
3913 
3914 	if (brightness < min)
3915 		return 0;
3916 	// Rescale min..max to 0..255
3917 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3918 				 max - min);
3919 }
3920 
3921 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3922 					 int bl_idx,
3923 					 u32 user_brightness)
3924 {
3925 	struct amdgpu_dm_backlight_caps caps;
3926 	struct dc_link *link;
3927 	u32 brightness;
3928 	bool rc;
3929 
3930 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3931 	caps = dm->backlight_caps[bl_idx];
3932 
3933 	dm->brightness[bl_idx] = user_brightness;
3934 	/* update scratch register */
3935 	if (bl_idx == 0)
3936 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3937 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3938 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3939 
3940 	/* Change brightness based on AUX property */
3941 	if (caps.aux_support) {
3942 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3943 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3944 		if (!rc)
3945 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3946 	} else {
3947 		rc = dc_link_set_backlight_level(link, brightness, 0);
3948 		if (!rc)
3949 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3950 	}
3951 
3952 	return rc ? 0 : 1;
3953 }
3954 
3955 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3956 {
3957 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3958 	int i;
3959 
3960 	for (i = 0; i < dm->num_of_edps; i++) {
3961 		if (bd == dm->backlight_dev[i])
3962 			break;
3963 	}
3964 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3965 		i = 0;
3966 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3967 
3968 	return 0;
3969 }
3970 
3971 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3972 					 int bl_idx)
3973 {
3974 	struct amdgpu_dm_backlight_caps caps;
3975 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3976 
3977 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3978 	caps = dm->backlight_caps[bl_idx];
3979 
3980 	if (caps.aux_support) {
3981 		u32 avg, peak;
3982 		bool rc;
3983 
3984 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3985 		if (!rc)
3986 			return dm->brightness[bl_idx];
3987 		return convert_brightness_to_user(&caps, avg);
3988 	} else {
3989 		int ret = dc_link_get_backlight_level(link);
3990 
3991 		if (ret == DC_ERROR_UNEXPECTED)
3992 			return dm->brightness[bl_idx];
3993 		return convert_brightness_to_user(&caps, ret);
3994 	}
3995 }
3996 
3997 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3998 {
3999 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4000 	int i;
4001 
4002 	for (i = 0; i < dm->num_of_edps; i++) {
4003 		if (bd == dm->backlight_dev[i])
4004 			break;
4005 	}
4006 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4007 		i = 0;
4008 	return amdgpu_dm_backlight_get_level(dm, i);
4009 }
4010 
4011 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4012 	.options = BL_CORE_SUSPENDRESUME,
4013 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4014 	.update_status	= amdgpu_dm_backlight_update_status,
4015 };
4016 
4017 static void
4018 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4019 {
4020 	char bl_name[16];
4021 	struct backlight_properties props = { 0 };
4022 
4023 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4024 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4025 
4026 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4027 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4028 	props.type = BACKLIGHT_RAW;
4029 
4030 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4031 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4032 
4033 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4034 								       adev_to_drm(dm->adev)->dev,
4035 								       dm,
4036 								       &amdgpu_dm_backlight_ops,
4037 								       &props);
4038 
4039 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4040 		DRM_ERROR("DM: Backlight registration failed!\n");
4041 	else
4042 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4043 }
4044 #endif
4045 
4046 static int initialize_plane(struct amdgpu_display_manager *dm,
4047 			    struct amdgpu_mode_info *mode_info, int plane_id,
4048 			    enum drm_plane_type plane_type,
4049 			    const struct dc_plane_cap *plane_cap)
4050 {
4051 	struct drm_plane *plane;
4052 	unsigned long possible_crtcs;
4053 	int ret = 0;
4054 
4055 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4056 	if (!plane) {
4057 		DRM_ERROR("KMS: Failed to allocate plane\n");
4058 		return -ENOMEM;
4059 	}
4060 	plane->type = plane_type;
4061 
4062 	/*
4063 	 * HACK: IGT tests expect that the primary plane for a CRTC
4064 	 * can only have one possible CRTC. Only expose support for
4065 	 * any CRTC if they're not going to be used as a primary plane
4066 	 * for a CRTC - like overlay or underlay planes.
4067 	 */
4068 	possible_crtcs = 1 << plane_id;
4069 	if (plane_id >= dm->dc->caps.max_streams)
4070 		possible_crtcs = 0xff;
4071 
4072 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4073 
4074 	if (ret) {
4075 		DRM_ERROR("KMS: Failed to initialize plane\n");
4076 		kfree(plane);
4077 		return ret;
4078 	}
4079 
4080 	if (mode_info)
4081 		mode_info->planes[plane_id] = plane;
4082 
4083 	return ret;
4084 }
4085 
4086 
4087 static void register_backlight_device(struct amdgpu_display_manager *dm,
4088 				      struct dc_link *link)
4089 {
4090 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4091 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4092 
4093 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4094 	    link->type != dc_connection_none) {
4095 		/*
4096 		 * Event if registration failed, we should continue with
4097 		 * DM initialization because not having a backlight control
4098 		 * is better then a black screen.
4099 		 */
4100 		if (!dm->backlight_dev[dm->num_of_edps])
4101 			amdgpu_dm_register_backlight_device(dm);
4102 
4103 		if (dm->backlight_dev[dm->num_of_edps]) {
4104 			dm->backlight_link[dm->num_of_edps] = link;
4105 			dm->num_of_edps++;
4106 		}
4107 	}
4108 #endif
4109 }
4110 
4111 
4112 /*
4113  * In this architecture, the association
4114  * connector -> encoder -> crtc
4115  * id not really requried. The crtc and connector will hold the
4116  * display_index as an abstraction to use with DAL component
4117  *
4118  * Returns 0 on success
4119  */
4120 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4121 {
4122 	struct amdgpu_display_manager *dm = &adev->dm;
4123 	int32_t i;
4124 	struct amdgpu_dm_connector *aconnector = NULL;
4125 	struct amdgpu_encoder *aencoder = NULL;
4126 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4127 	uint32_t link_cnt;
4128 	int32_t primary_planes;
4129 	enum dc_connection_type new_connection_type = dc_connection_none;
4130 	const struct dc_plane_cap *plane;
4131 	bool psr_feature_enabled = false;
4132 
4133 	dm->display_indexes_num = dm->dc->caps.max_streams;
4134 	/* Update the actual used number of crtc */
4135 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4136 
4137 	link_cnt = dm->dc->caps.max_links;
4138 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4139 		DRM_ERROR("DM: Failed to initialize mode config\n");
4140 		return -EINVAL;
4141 	}
4142 
4143 	/* There is one primary plane per CRTC */
4144 	primary_planes = dm->dc->caps.max_streams;
4145 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4146 
4147 	/*
4148 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4149 	 * Order is reversed to match iteration order in atomic check.
4150 	 */
4151 	for (i = (primary_planes - 1); i >= 0; i--) {
4152 		plane = &dm->dc->caps.planes[i];
4153 
4154 		if (initialize_plane(dm, mode_info, i,
4155 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4156 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4157 			goto fail;
4158 		}
4159 	}
4160 
4161 	/*
4162 	 * Initialize overlay planes, index starting after primary planes.
4163 	 * These planes have a higher DRM index than the primary planes since
4164 	 * they should be considered as having a higher z-order.
4165 	 * Order is reversed to match iteration order in atomic check.
4166 	 *
4167 	 * Only support DCN for now, and only expose one so we don't encourage
4168 	 * userspace to use up all the pipes.
4169 	 */
4170 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4171 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4172 
4173 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4174 			continue;
4175 
4176 		if (!plane->blends_with_above || !plane->blends_with_below)
4177 			continue;
4178 
4179 		if (!plane->pixel_format_support.argb8888)
4180 			continue;
4181 
4182 		if (initialize_plane(dm, NULL, primary_planes + i,
4183 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4184 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4185 			goto fail;
4186 		}
4187 
4188 		/* Only create one overlay plane. */
4189 		break;
4190 	}
4191 
4192 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4193 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4194 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4195 			goto fail;
4196 		}
4197 
4198 #if defined(CONFIG_DRM_AMD_DC_DCN)
4199 	/* Use Outbox interrupt */
4200 	switch (adev->ip_versions[DCE_HWIP][0]) {
4201 	case IP_VERSION(3, 0, 0):
4202 	case IP_VERSION(3, 1, 2):
4203 	case IP_VERSION(3, 1, 3):
4204 	case IP_VERSION(2, 1, 0):
4205 		if (register_outbox_irq_handlers(dm->adev)) {
4206 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4207 			goto fail;
4208 		}
4209 		break;
4210 	default:
4211 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4212 			      adev->ip_versions[DCE_HWIP][0]);
4213 	}
4214 
4215 	/* Determine whether to enable PSR support by default. */
4216 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4217 		switch (adev->ip_versions[DCE_HWIP][0]) {
4218 		case IP_VERSION(3, 1, 2):
4219 		case IP_VERSION(3, 1, 3):
4220 			psr_feature_enabled = true;
4221 			break;
4222 		default:
4223 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4224 			break;
4225 		}
4226 	}
4227 #endif
4228 
4229 	/* loops over all connectors on the board */
4230 	for (i = 0; i < link_cnt; i++) {
4231 		struct dc_link *link = NULL;
4232 
4233 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4234 			DRM_ERROR(
4235 				"KMS: Cannot support more than %d display indexes\n",
4236 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4237 			continue;
4238 		}
4239 
4240 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4241 		if (!aconnector)
4242 			goto fail;
4243 
4244 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4245 		if (!aencoder)
4246 			goto fail;
4247 
4248 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4249 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4250 			goto fail;
4251 		}
4252 
4253 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4254 			DRM_ERROR("KMS: Failed to initialize connector\n");
4255 			goto fail;
4256 		}
4257 
4258 		link = dc_get_link_at_index(dm->dc, i);
4259 
4260 		if (!dc_link_detect_sink(link, &new_connection_type))
4261 			DRM_ERROR("KMS: Failed to detect connector\n");
4262 
4263 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4264 			emulated_link_detect(link);
4265 			amdgpu_dm_update_connector_after_detect(aconnector);
4266 
4267 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4268 			amdgpu_dm_update_connector_after_detect(aconnector);
4269 			register_backlight_device(dm, link);
4270 			if (dm->num_of_edps)
4271 				update_connector_ext_caps(aconnector);
4272 			if (psr_feature_enabled)
4273 				amdgpu_dm_set_psr_caps(link);
4274 		}
4275 
4276 
4277 	}
4278 
4279 	/* Software is initialized. Now we can register interrupt handlers. */
4280 	switch (adev->asic_type) {
4281 #if defined(CONFIG_DRM_AMD_DC_SI)
4282 	case CHIP_TAHITI:
4283 	case CHIP_PITCAIRN:
4284 	case CHIP_VERDE:
4285 	case CHIP_OLAND:
4286 		if (dce60_register_irq_handlers(dm->adev)) {
4287 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4288 			goto fail;
4289 		}
4290 		break;
4291 #endif
4292 	case CHIP_BONAIRE:
4293 	case CHIP_HAWAII:
4294 	case CHIP_KAVERI:
4295 	case CHIP_KABINI:
4296 	case CHIP_MULLINS:
4297 	case CHIP_TONGA:
4298 	case CHIP_FIJI:
4299 	case CHIP_CARRIZO:
4300 	case CHIP_STONEY:
4301 	case CHIP_POLARIS11:
4302 	case CHIP_POLARIS10:
4303 	case CHIP_POLARIS12:
4304 	case CHIP_VEGAM:
4305 	case CHIP_VEGA10:
4306 	case CHIP_VEGA12:
4307 	case CHIP_VEGA20:
4308 		if (dce110_register_irq_handlers(dm->adev)) {
4309 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4310 			goto fail;
4311 		}
4312 		break;
4313 	default:
4314 #if defined(CONFIG_DRM_AMD_DC_DCN)
4315 		switch (adev->ip_versions[DCE_HWIP][0]) {
4316 		case IP_VERSION(1, 0, 0):
4317 		case IP_VERSION(1, 0, 1):
4318 		case IP_VERSION(2, 0, 2):
4319 		case IP_VERSION(2, 0, 3):
4320 		case IP_VERSION(2, 0, 0):
4321 		case IP_VERSION(2, 1, 0):
4322 		case IP_VERSION(3, 0, 0):
4323 		case IP_VERSION(3, 0, 2):
4324 		case IP_VERSION(3, 0, 3):
4325 		case IP_VERSION(3, 0, 1):
4326 		case IP_VERSION(3, 1, 2):
4327 		case IP_VERSION(3, 1, 3):
4328 			if (dcn10_register_irq_handlers(dm->adev)) {
4329 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4330 				goto fail;
4331 			}
4332 			break;
4333 		default:
4334 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4335 					adev->ip_versions[DCE_HWIP][0]);
4336 			goto fail;
4337 		}
4338 #endif
4339 		break;
4340 	}
4341 
4342 	return 0;
4343 fail:
4344 	kfree(aencoder);
4345 	kfree(aconnector);
4346 
4347 	return -EINVAL;
4348 }
4349 
4350 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4351 {
4352 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4353 	return;
4354 }
4355 
4356 /******************************************************************************
4357  * amdgpu_display_funcs functions
4358  *****************************************************************************/
4359 
4360 /*
4361  * dm_bandwidth_update - program display watermarks
4362  *
4363  * @adev: amdgpu_device pointer
4364  *
4365  * Calculate and program the display watermarks and line buffer allocation.
4366  */
4367 static void dm_bandwidth_update(struct amdgpu_device *adev)
4368 {
4369 	/* TODO: implement later */
4370 }
4371 
4372 static const struct amdgpu_display_funcs dm_display_funcs = {
4373 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4374 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4375 	.backlight_set_level = NULL, /* never called for DC */
4376 	.backlight_get_level = NULL, /* never called for DC */
4377 	.hpd_sense = NULL,/* called unconditionally */
4378 	.hpd_set_polarity = NULL, /* called unconditionally */
4379 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4380 	.page_flip_get_scanoutpos =
4381 		dm_crtc_get_scanoutpos,/* called unconditionally */
4382 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4383 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4384 };
4385 
4386 #if defined(CONFIG_DEBUG_KERNEL_DC)
4387 
4388 static ssize_t s3_debug_store(struct device *device,
4389 			      struct device_attribute *attr,
4390 			      const char *buf,
4391 			      size_t count)
4392 {
4393 	int ret;
4394 	int s3_state;
4395 	struct drm_device *drm_dev = dev_get_drvdata(device);
4396 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4397 
4398 	ret = kstrtoint(buf, 0, &s3_state);
4399 
4400 	if (ret == 0) {
4401 		if (s3_state) {
4402 			dm_resume(adev);
4403 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4404 		} else
4405 			dm_suspend(adev);
4406 	}
4407 
4408 	return ret == 0 ? count : 0;
4409 }
4410 
4411 DEVICE_ATTR_WO(s3_debug);
4412 
4413 #endif
4414 
4415 static int dm_early_init(void *handle)
4416 {
4417 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4418 
4419 	switch (adev->asic_type) {
4420 #if defined(CONFIG_DRM_AMD_DC_SI)
4421 	case CHIP_TAHITI:
4422 	case CHIP_PITCAIRN:
4423 	case CHIP_VERDE:
4424 		adev->mode_info.num_crtc = 6;
4425 		adev->mode_info.num_hpd = 6;
4426 		adev->mode_info.num_dig = 6;
4427 		break;
4428 	case CHIP_OLAND:
4429 		adev->mode_info.num_crtc = 2;
4430 		adev->mode_info.num_hpd = 2;
4431 		adev->mode_info.num_dig = 2;
4432 		break;
4433 #endif
4434 	case CHIP_BONAIRE:
4435 	case CHIP_HAWAII:
4436 		adev->mode_info.num_crtc = 6;
4437 		adev->mode_info.num_hpd = 6;
4438 		adev->mode_info.num_dig = 6;
4439 		break;
4440 	case CHIP_KAVERI:
4441 		adev->mode_info.num_crtc = 4;
4442 		adev->mode_info.num_hpd = 6;
4443 		adev->mode_info.num_dig = 7;
4444 		break;
4445 	case CHIP_KABINI:
4446 	case CHIP_MULLINS:
4447 		adev->mode_info.num_crtc = 2;
4448 		adev->mode_info.num_hpd = 6;
4449 		adev->mode_info.num_dig = 6;
4450 		break;
4451 	case CHIP_FIJI:
4452 	case CHIP_TONGA:
4453 		adev->mode_info.num_crtc = 6;
4454 		adev->mode_info.num_hpd = 6;
4455 		adev->mode_info.num_dig = 7;
4456 		break;
4457 	case CHIP_CARRIZO:
4458 		adev->mode_info.num_crtc = 3;
4459 		adev->mode_info.num_hpd = 6;
4460 		adev->mode_info.num_dig = 9;
4461 		break;
4462 	case CHIP_STONEY:
4463 		adev->mode_info.num_crtc = 2;
4464 		adev->mode_info.num_hpd = 6;
4465 		adev->mode_info.num_dig = 9;
4466 		break;
4467 	case CHIP_POLARIS11:
4468 	case CHIP_POLARIS12:
4469 		adev->mode_info.num_crtc = 5;
4470 		adev->mode_info.num_hpd = 5;
4471 		adev->mode_info.num_dig = 5;
4472 		break;
4473 	case CHIP_POLARIS10:
4474 	case CHIP_VEGAM:
4475 		adev->mode_info.num_crtc = 6;
4476 		adev->mode_info.num_hpd = 6;
4477 		adev->mode_info.num_dig = 6;
4478 		break;
4479 	case CHIP_VEGA10:
4480 	case CHIP_VEGA12:
4481 	case CHIP_VEGA20:
4482 		adev->mode_info.num_crtc = 6;
4483 		adev->mode_info.num_hpd = 6;
4484 		adev->mode_info.num_dig = 6;
4485 		break;
4486 	default:
4487 #if defined(CONFIG_DRM_AMD_DC_DCN)
4488 		switch (adev->ip_versions[DCE_HWIP][0]) {
4489 		case IP_VERSION(2, 0, 2):
4490 		case IP_VERSION(3, 0, 0):
4491 			adev->mode_info.num_crtc = 6;
4492 			adev->mode_info.num_hpd = 6;
4493 			adev->mode_info.num_dig = 6;
4494 			break;
4495 		case IP_VERSION(2, 0, 0):
4496 		case IP_VERSION(3, 0, 2):
4497 			adev->mode_info.num_crtc = 5;
4498 			adev->mode_info.num_hpd = 5;
4499 			adev->mode_info.num_dig = 5;
4500 			break;
4501 		case IP_VERSION(2, 0, 3):
4502 		case IP_VERSION(3, 0, 3):
4503 			adev->mode_info.num_crtc = 2;
4504 			adev->mode_info.num_hpd = 2;
4505 			adev->mode_info.num_dig = 2;
4506 			break;
4507 		case IP_VERSION(1, 0, 0):
4508 		case IP_VERSION(1, 0, 1):
4509 		case IP_VERSION(3, 0, 1):
4510 		case IP_VERSION(2, 1, 0):
4511 		case IP_VERSION(3, 1, 2):
4512 		case IP_VERSION(3, 1, 3):
4513 			adev->mode_info.num_crtc = 4;
4514 			adev->mode_info.num_hpd = 4;
4515 			adev->mode_info.num_dig = 4;
4516 			break;
4517 		default:
4518 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4519 					adev->ip_versions[DCE_HWIP][0]);
4520 			return -EINVAL;
4521 		}
4522 #endif
4523 		break;
4524 	}
4525 
4526 	amdgpu_dm_set_irq_funcs(adev);
4527 
4528 	if (adev->mode_info.funcs == NULL)
4529 		adev->mode_info.funcs = &dm_display_funcs;
4530 
4531 	/*
4532 	 * Note: Do NOT change adev->audio_endpt_rreg and
4533 	 * adev->audio_endpt_wreg because they are initialised in
4534 	 * amdgpu_device_init()
4535 	 */
4536 #if defined(CONFIG_DEBUG_KERNEL_DC)
4537 	device_create_file(
4538 		adev_to_drm(adev)->dev,
4539 		&dev_attr_s3_debug);
4540 #endif
4541 
4542 	return 0;
4543 }
4544 
4545 static bool modeset_required(struct drm_crtc_state *crtc_state,
4546 			     struct dc_stream_state *new_stream,
4547 			     struct dc_stream_state *old_stream)
4548 {
4549 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4550 }
4551 
4552 static bool modereset_required(struct drm_crtc_state *crtc_state)
4553 {
4554 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4555 }
4556 
4557 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4558 {
4559 	drm_encoder_cleanup(encoder);
4560 	kfree(encoder);
4561 }
4562 
4563 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4564 	.destroy = amdgpu_dm_encoder_destroy,
4565 };
4566 
4567 
4568 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4569 					 struct drm_framebuffer *fb,
4570 					 int *min_downscale, int *max_upscale)
4571 {
4572 	struct amdgpu_device *adev = drm_to_adev(dev);
4573 	struct dc *dc = adev->dm.dc;
4574 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4575 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4576 
4577 	switch (fb->format->format) {
4578 	case DRM_FORMAT_P010:
4579 	case DRM_FORMAT_NV12:
4580 	case DRM_FORMAT_NV21:
4581 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4582 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4583 		break;
4584 
4585 	case DRM_FORMAT_XRGB16161616F:
4586 	case DRM_FORMAT_ARGB16161616F:
4587 	case DRM_FORMAT_XBGR16161616F:
4588 	case DRM_FORMAT_ABGR16161616F:
4589 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4590 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4591 		break;
4592 
4593 	default:
4594 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4595 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4596 		break;
4597 	}
4598 
4599 	/*
4600 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4601 	 * scaling factor of 1.0 == 1000 units.
4602 	 */
4603 	if (*max_upscale == 1)
4604 		*max_upscale = 1000;
4605 
4606 	if (*min_downscale == 1)
4607 		*min_downscale = 1000;
4608 }
4609 
4610 
4611 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4612 				const struct drm_plane_state *state,
4613 				struct dc_scaling_info *scaling_info)
4614 {
4615 	int scale_w, scale_h, min_downscale, max_upscale;
4616 
4617 	memset(scaling_info, 0, sizeof(*scaling_info));
4618 
4619 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4620 	scaling_info->src_rect.x = state->src_x >> 16;
4621 	scaling_info->src_rect.y = state->src_y >> 16;
4622 
4623 	/*
4624 	 * For reasons we don't (yet) fully understand a non-zero
4625 	 * src_y coordinate into an NV12 buffer can cause a
4626 	 * system hang on DCN1x.
4627 	 * To avoid hangs (and maybe be overly cautious)
4628 	 * let's reject both non-zero src_x and src_y.
4629 	 *
4630 	 * We currently know of only one use-case to reproduce a
4631 	 * scenario with non-zero src_x and src_y for NV12, which
4632 	 * is to gesture the YouTube Android app into full screen
4633 	 * on ChromeOS.
4634 	 */
4635 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4636 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4637 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4638 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4639 		return -EINVAL;
4640 
4641 	scaling_info->src_rect.width = state->src_w >> 16;
4642 	if (scaling_info->src_rect.width == 0)
4643 		return -EINVAL;
4644 
4645 	scaling_info->src_rect.height = state->src_h >> 16;
4646 	if (scaling_info->src_rect.height == 0)
4647 		return -EINVAL;
4648 
4649 	scaling_info->dst_rect.x = state->crtc_x;
4650 	scaling_info->dst_rect.y = state->crtc_y;
4651 
4652 	if (state->crtc_w == 0)
4653 		return -EINVAL;
4654 
4655 	scaling_info->dst_rect.width = state->crtc_w;
4656 
4657 	if (state->crtc_h == 0)
4658 		return -EINVAL;
4659 
4660 	scaling_info->dst_rect.height = state->crtc_h;
4661 
4662 	/* DRM doesn't specify clipping on destination output. */
4663 	scaling_info->clip_rect = scaling_info->dst_rect;
4664 
4665 	/* Validate scaling per-format with DC plane caps */
4666 	if (state->plane && state->plane->dev && state->fb) {
4667 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4668 					     &min_downscale, &max_upscale);
4669 	} else {
4670 		min_downscale = 250;
4671 		max_upscale = 16000;
4672 	}
4673 
4674 	scale_w = scaling_info->dst_rect.width * 1000 /
4675 		  scaling_info->src_rect.width;
4676 
4677 	if (scale_w < min_downscale || scale_w > max_upscale)
4678 		return -EINVAL;
4679 
4680 	scale_h = scaling_info->dst_rect.height * 1000 /
4681 		  scaling_info->src_rect.height;
4682 
4683 	if (scale_h < min_downscale || scale_h > max_upscale)
4684 		return -EINVAL;
4685 
4686 	/*
4687 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4688 	 * assume reasonable defaults based on the format.
4689 	 */
4690 
4691 	return 0;
4692 }
4693 
4694 static void
4695 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4696 				 uint64_t tiling_flags)
4697 {
4698 	/* Fill GFX8 params */
4699 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4700 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4701 
4702 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4703 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4704 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4705 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4706 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4707 
4708 		/* XXX fix me for VI */
4709 		tiling_info->gfx8.num_banks = num_banks;
4710 		tiling_info->gfx8.array_mode =
4711 				DC_ARRAY_2D_TILED_THIN1;
4712 		tiling_info->gfx8.tile_split = tile_split;
4713 		tiling_info->gfx8.bank_width = bankw;
4714 		tiling_info->gfx8.bank_height = bankh;
4715 		tiling_info->gfx8.tile_aspect = mtaspect;
4716 		tiling_info->gfx8.tile_mode =
4717 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4718 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4719 			== DC_ARRAY_1D_TILED_THIN1) {
4720 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4721 	}
4722 
4723 	tiling_info->gfx8.pipe_config =
4724 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4725 }
4726 
4727 static void
4728 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4729 				  union dc_tiling_info *tiling_info)
4730 {
4731 	tiling_info->gfx9.num_pipes =
4732 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4733 	tiling_info->gfx9.num_banks =
4734 		adev->gfx.config.gb_addr_config_fields.num_banks;
4735 	tiling_info->gfx9.pipe_interleave =
4736 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4737 	tiling_info->gfx9.num_shader_engines =
4738 		adev->gfx.config.gb_addr_config_fields.num_se;
4739 	tiling_info->gfx9.max_compressed_frags =
4740 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4741 	tiling_info->gfx9.num_rb_per_se =
4742 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4743 	tiling_info->gfx9.shaderEnable = 1;
4744 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4745 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4746 }
4747 
4748 static int
4749 validate_dcc(struct amdgpu_device *adev,
4750 	     const enum surface_pixel_format format,
4751 	     const enum dc_rotation_angle rotation,
4752 	     const union dc_tiling_info *tiling_info,
4753 	     const struct dc_plane_dcc_param *dcc,
4754 	     const struct dc_plane_address *address,
4755 	     const struct plane_size *plane_size)
4756 {
4757 	struct dc *dc = adev->dm.dc;
4758 	struct dc_dcc_surface_param input;
4759 	struct dc_surface_dcc_cap output;
4760 
4761 	memset(&input, 0, sizeof(input));
4762 	memset(&output, 0, sizeof(output));
4763 
4764 	if (!dcc->enable)
4765 		return 0;
4766 
4767 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4768 	    !dc->cap_funcs.get_dcc_compression_cap)
4769 		return -EINVAL;
4770 
4771 	input.format = format;
4772 	input.surface_size.width = plane_size->surface_size.width;
4773 	input.surface_size.height = plane_size->surface_size.height;
4774 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4775 
4776 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4777 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4778 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4779 		input.scan = SCAN_DIRECTION_VERTICAL;
4780 
4781 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4782 		return -EINVAL;
4783 
4784 	if (!output.capable)
4785 		return -EINVAL;
4786 
4787 	if (dcc->independent_64b_blks == 0 &&
4788 	    output.grph.rgb.independent_64b_blks != 0)
4789 		return -EINVAL;
4790 
4791 	return 0;
4792 }
4793 
4794 static bool
4795 modifier_has_dcc(uint64_t modifier)
4796 {
4797 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4798 }
4799 
4800 static unsigned
4801 modifier_gfx9_swizzle_mode(uint64_t modifier)
4802 {
4803 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4804 		return 0;
4805 
4806 	return AMD_FMT_MOD_GET(TILE, modifier);
4807 }
4808 
4809 static const struct drm_format_info *
4810 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4811 {
4812 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4813 }
4814 
4815 static void
4816 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4817 				    union dc_tiling_info *tiling_info,
4818 				    uint64_t modifier)
4819 {
4820 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4821 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4822 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4823 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4824 
4825 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4826 
4827 	if (!IS_AMD_FMT_MOD(modifier))
4828 		return;
4829 
4830 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4831 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4832 
4833 	if (adev->family >= AMDGPU_FAMILY_NV) {
4834 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4835 	} else {
4836 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4837 
4838 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4839 	}
4840 }
4841 
4842 enum dm_micro_swizzle {
4843 	MICRO_SWIZZLE_Z = 0,
4844 	MICRO_SWIZZLE_S = 1,
4845 	MICRO_SWIZZLE_D = 2,
4846 	MICRO_SWIZZLE_R = 3
4847 };
4848 
4849 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4850 					  uint32_t format,
4851 					  uint64_t modifier)
4852 {
4853 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4854 	const struct drm_format_info *info = drm_format_info(format);
4855 	int i;
4856 
4857 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4858 
4859 	if (!info)
4860 		return false;
4861 
4862 	/*
4863 	 * We always have to allow these modifiers:
4864 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4865 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4866 	 */
4867 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4868 	    modifier == DRM_FORMAT_MOD_INVALID) {
4869 		return true;
4870 	}
4871 
4872 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4873 	for (i = 0; i < plane->modifier_count; i++) {
4874 		if (modifier == plane->modifiers[i])
4875 			break;
4876 	}
4877 	if (i == plane->modifier_count)
4878 		return false;
4879 
4880 	/*
4881 	 * For D swizzle the canonical modifier depends on the bpp, so check
4882 	 * it here.
4883 	 */
4884 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4885 	    adev->family >= AMDGPU_FAMILY_NV) {
4886 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4887 			return false;
4888 	}
4889 
4890 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4891 	    info->cpp[0] < 8)
4892 		return false;
4893 
4894 	if (modifier_has_dcc(modifier)) {
4895 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4896 		if (info->cpp[0] != 4)
4897 			return false;
4898 		/* We support multi-planar formats, but not when combined with
4899 		 * additional DCC metadata planes. */
4900 		if (info->num_planes > 1)
4901 			return false;
4902 	}
4903 
4904 	return true;
4905 }
4906 
4907 static void
4908 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4909 {
4910 	if (!*mods)
4911 		return;
4912 
4913 	if (*cap - *size < 1) {
4914 		uint64_t new_cap = *cap * 2;
4915 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4916 
4917 		if (!new_mods) {
4918 			kfree(*mods);
4919 			*mods = NULL;
4920 			return;
4921 		}
4922 
4923 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4924 		kfree(*mods);
4925 		*mods = new_mods;
4926 		*cap = new_cap;
4927 	}
4928 
4929 	(*mods)[*size] = mod;
4930 	*size += 1;
4931 }
4932 
4933 static void
4934 add_gfx9_modifiers(const struct amdgpu_device *adev,
4935 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4936 {
4937 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4938 	int pipe_xor_bits = min(8, pipes +
4939 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4940 	int bank_xor_bits = min(8 - pipe_xor_bits,
4941 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4942 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4943 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4944 
4945 
4946 	if (adev->family == AMDGPU_FAMILY_RV) {
4947 		/* Raven2 and later */
4948 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4949 
4950 		/*
4951 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4952 		 * doesn't support _D on DCN
4953 		 */
4954 
4955 		if (has_constant_encode) {
4956 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4957 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4958 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4959 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4960 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4961 				    AMD_FMT_MOD_SET(DCC, 1) |
4962 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4963 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4964 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4965 		}
4966 
4967 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4968 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4969 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4970 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4971 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4972 			    AMD_FMT_MOD_SET(DCC, 1) |
4973 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4974 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4975 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4976 
4977 		if (has_constant_encode) {
4978 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4979 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4980 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4981 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4982 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4983 				    AMD_FMT_MOD_SET(DCC, 1) |
4984 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4985 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4986 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4987 
4988 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4989 				    AMD_FMT_MOD_SET(RB, rb) |
4990 				    AMD_FMT_MOD_SET(PIPE, pipes));
4991 		}
4992 
4993 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4994 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4995 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4996 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4997 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4998 			    AMD_FMT_MOD_SET(DCC, 1) |
4999 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5000 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5001 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5002 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5003 			    AMD_FMT_MOD_SET(RB, rb) |
5004 			    AMD_FMT_MOD_SET(PIPE, pipes));
5005 	}
5006 
5007 	/*
5008 	 * Only supported for 64bpp on Raven, will be filtered on format in
5009 	 * dm_plane_format_mod_supported.
5010 	 */
5011 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5012 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5013 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5014 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5015 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5016 
5017 	if (adev->family == AMDGPU_FAMILY_RV) {
5018 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5019 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5020 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5021 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5022 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5023 	}
5024 
5025 	/*
5026 	 * Only supported for 64bpp on Raven, will be filtered on format in
5027 	 * dm_plane_format_mod_supported.
5028 	 */
5029 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5030 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5031 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5032 
5033 	if (adev->family == AMDGPU_FAMILY_RV) {
5034 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5035 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5036 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5037 	}
5038 }
5039 
5040 static void
5041 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5042 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5043 {
5044 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5045 
5046 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5047 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5048 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5049 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5050 		    AMD_FMT_MOD_SET(DCC, 1) |
5051 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5052 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5053 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5054 
5055 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5056 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5057 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5058 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5059 		    AMD_FMT_MOD_SET(DCC, 1) |
5060 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5061 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5062 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5063 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5064 
5065 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5066 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5067 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5068 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5069 
5070 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5071 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5072 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5073 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5074 
5075 
5076 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5077 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5078 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5079 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5080 
5081 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5082 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5083 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5084 }
5085 
5086 static void
5087 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5088 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5089 {
5090 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5091 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5092 
5093 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5094 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5095 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5096 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5097 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5098 		    AMD_FMT_MOD_SET(DCC, 1) |
5099 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5100 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5101 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5102 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5103 
5104 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5105 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5106 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5107 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5108 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5109 		    AMD_FMT_MOD_SET(DCC, 1) |
5110 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5111 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5112 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5113 
5114 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5115 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5116 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5117 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5118 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5119 		    AMD_FMT_MOD_SET(DCC, 1) |
5120 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5121 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5122 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5123 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5124 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5125 
5126 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5127 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5128 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5129 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5130 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5131 		    AMD_FMT_MOD_SET(DCC, 1) |
5132 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5133 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5134 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5135 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5136 
5137 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5138 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5139 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5140 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5141 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5142 
5143 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5144 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5145 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5146 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5147 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5148 
5149 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5150 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5151 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5152 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5153 
5154 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5155 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5156 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5157 }
5158 
5159 static int
5160 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5161 {
5162 	uint64_t size = 0, capacity = 128;
5163 	*mods = NULL;
5164 
5165 	/* We have not hooked up any pre-GFX9 modifiers. */
5166 	if (adev->family < AMDGPU_FAMILY_AI)
5167 		return 0;
5168 
5169 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5170 
5171 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5172 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5173 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5174 		return *mods ? 0 : -ENOMEM;
5175 	}
5176 
5177 	switch (adev->family) {
5178 	case AMDGPU_FAMILY_AI:
5179 	case AMDGPU_FAMILY_RV:
5180 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5181 		break;
5182 	case AMDGPU_FAMILY_NV:
5183 	case AMDGPU_FAMILY_VGH:
5184 	case AMDGPU_FAMILY_YC:
5185 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5186 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5187 		else
5188 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5189 		break;
5190 	}
5191 
5192 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5193 
5194 	/* INVALID marks the end of the list. */
5195 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5196 
5197 	if (!*mods)
5198 		return -ENOMEM;
5199 
5200 	return 0;
5201 }
5202 
5203 static int
5204 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5205 					  const struct amdgpu_framebuffer *afb,
5206 					  const enum surface_pixel_format format,
5207 					  const enum dc_rotation_angle rotation,
5208 					  const struct plane_size *plane_size,
5209 					  union dc_tiling_info *tiling_info,
5210 					  struct dc_plane_dcc_param *dcc,
5211 					  struct dc_plane_address *address,
5212 					  const bool force_disable_dcc)
5213 {
5214 	const uint64_t modifier = afb->base.modifier;
5215 	int ret = 0;
5216 
5217 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5218 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5219 
5220 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5221 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5222 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5223 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5224 
5225 		dcc->enable = 1;
5226 		dcc->meta_pitch = afb->base.pitches[1];
5227 		dcc->independent_64b_blks = independent_64b_blks;
5228 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5229 			if (independent_64b_blks && independent_128b_blks)
5230 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5231 			else if (independent_128b_blks)
5232 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5233 			else if (independent_64b_blks && !independent_128b_blks)
5234 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5235 			else
5236 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5237 		} else {
5238 			if (independent_64b_blks)
5239 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5240 			else
5241 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5242 		}
5243 
5244 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5245 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5246 	}
5247 
5248 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5249 	if (ret)
5250 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5251 
5252 	return ret;
5253 }
5254 
5255 static int
5256 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5257 			     const struct amdgpu_framebuffer *afb,
5258 			     const enum surface_pixel_format format,
5259 			     const enum dc_rotation_angle rotation,
5260 			     const uint64_t tiling_flags,
5261 			     union dc_tiling_info *tiling_info,
5262 			     struct plane_size *plane_size,
5263 			     struct dc_plane_dcc_param *dcc,
5264 			     struct dc_plane_address *address,
5265 			     bool tmz_surface,
5266 			     bool force_disable_dcc)
5267 {
5268 	const struct drm_framebuffer *fb = &afb->base;
5269 	int ret;
5270 
5271 	memset(tiling_info, 0, sizeof(*tiling_info));
5272 	memset(plane_size, 0, sizeof(*plane_size));
5273 	memset(dcc, 0, sizeof(*dcc));
5274 	memset(address, 0, sizeof(*address));
5275 
5276 	address->tmz_surface = tmz_surface;
5277 
5278 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5279 		uint64_t addr = afb->address + fb->offsets[0];
5280 
5281 		plane_size->surface_size.x = 0;
5282 		plane_size->surface_size.y = 0;
5283 		plane_size->surface_size.width = fb->width;
5284 		plane_size->surface_size.height = fb->height;
5285 		plane_size->surface_pitch =
5286 			fb->pitches[0] / fb->format->cpp[0];
5287 
5288 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5289 		address->grph.addr.low_part = lower_32_bits(addr);
5290 		address->grph.addr.high_part = upper_32_bits(addr);
5291 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5292 		uint64_t luma_addr = afb->address + fb->offsets[0];
5293 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5294 
5295 		plane_size->surface_size.x = 0;
5296 		plane_size->surface_size.y = 0;
5297 		plane_size->surface_size.width = fb->width;
5298 		plane_size->surface_size.height = fb->height;
5299 		plane_size->surface_pitch =
5300 			fb->pitches[0] / fb->format->cpp[0];
5301 
5302 		plane_size->chroma_size.x = 0;
5303 		plane_size->chroma_size.y = 0;
5304 		/* TODO: set these based on surface format */
5305 		plane_size->chroma_size.width = fb->width / 2;
5306 		plane_size->chroma_size.height = fb->height / 2;
5307 
5308 		plane_size->chroma_pitch =
5309 			fb->pitches[1] / fb->format->cpp[1];
5310 
5311 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5312 		address->video_progressive.luma_addr.low_part =
5313 			lower_32_bits(luma_addr);
5314 		address->video_progressive.luma_addr.high_part =
5315 			upper_32_bits(luma_addr);
5316 		address->video_progressive.chroma_addr.low_part =
5317 			lower_32_bits(chroma_addr);
5318 		address->video_progressive.chroma_addr.high_part =
5319 			upper_32_bits(chroma_addr);
5320 	}
5321 
5322 	if (adev->family >= AMDGPU_FAMILY_AI) {
5323 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5324 								rotation, plane_size,
5325 								tiling_info, dcc,
5326 								address,
5327 								force_disable_dcc);
5328 		if (ret)
5329 			return ret;
5330 	} else {
5331 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5332 	}
5333 
5334 	return 0;
5335 }
5336 
5337 static void
5338 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5339 			       bool *per_pixel_alpha, bool *global_alpha,
5340 			       int *global_alpha_value)
5341 {
5342 	*per_pixel_alpha = false;
5343 	*global_alpha = false;
5344 	*global_alpha_value = 0xff;
5345 
5346 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5347 		return;
5348 
5349 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5350 		static const uint32_t alpha_formats[] = {
5351 			DRM_FORMAT_ARGB8888,
5352 			DRM_FORMAT_RGBA8888,
5353 			DRM_FORMAT_ABGR8888,
5354 		};
5355 		uint32_t format = plane_state->fb->format->format;
5356 		unsigned int i;
5357 
5358 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5359 			if (format == alpha_formats[i]) {
5360 				*per_pixel_alpha = true;
5361 				break;
5362 			}
5363 		}
5364 	}
5365 
5366 	if (plane_state->alpha < 0xffff) {
5367 		*global_alpha = true;
5368 		*global_alpha_value = plane_state->alpha >> 8;
5369 	}
5370 }
5371 
5372 static int
5373 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5374 			    const enum surface_pixel_format format,
5375 			    enum dc_color_space *color_space)
5376 {
5377 	bool full_range;
5378 
5379 	*color_space = COLOR_SPACE_SRGB;
5380 
5381 	/* DRM color properties only affect non-RGB formats. */
5382 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5383 		return 0;
5384 
5385 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5386 
5387 	switch (plane_state->color_encoding) {
5388 	case DRM_COLOR_YCBCR_BT601:
5389 		if (full_range)
5390 			*color_space = COLOR_SPACE_YCBCR601;
5391 		else
5392 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5393 		break;
5394 
5395 	case DRM_COLOR_YCBCR_BT709:
5396 		if (full_range)
5397 			*color_space = COLOR_SPACE_YCBCR709;
5398 		else
5399 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5400 		break;
5401 
5402 	case DRM_COLOR_YCBCR_BT2020:
5403 		if (full_range)
5404 			*color_space = COLOR_SPACE_2020_YCBCR;
5405 		else
5406 			return -EINVAL;
5407 		break;
5408 
5409 	default:
5410 		return -EINVAL;
5411 	}
5412 
5413 	return 0;
5414 }
5415 
5416 static int
5417 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5418 			    const struct drm_plane_state *plane_state,
5419 			    const uint64_t tiling_flags,
5420 			    struct dc_plane_info *plane_info,
5421 			    struct dc_plane_address *address,
5422 			    bool tmz_surface,
5423 			    bool force_disable_dcc)
5424 {
5425 	const struct drm_framebuffer *fb = plane_state->fb;
5426 	const struct amdgpu_framebuffer *afb =
5427 		to_amdgpu_framebuffer(plane_state->fb);
5428 	int ret;
5429 
5430 	memset(plane_info, 0, sizeof(*plane_info));
5431 
5432 	switch (fb->format->format) {
5433 	case DRM_FORMAT_C8:
5434 		plane_info->format =
5435 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5436 		break;
5437 	case DRM_FORMAT_RGB565:
5438 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5439 		break;
5440 	case DRM_FORMAT_XRGB8888:
5441 	case DRM_FORMAT_ARGB8888:
5442 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5443 		break;
5444 	case DRM_FORMAT_XRGB2101010:
5445 	case DRM_FORMAT_ARGB2101010:
5446 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5447 		break;
5448 	case DRM_FORMAT_XBGR2101010:
5449 	case DRM_FORMAT_ABGR2101010:
5450 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5451 		break;
5452 	case DRM_FORMAT_XBGR8888:
5453 	case DRM_FORMAT_ABGR8888:
5454 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5455 		break;
5456 	case DRM_FORMAT_NV21:
5457 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5458 		break;
5459 	case DRM_FORMAT_NV12:
5460 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5461 		break;
5462 	case DRM_FORMAT_P010:
5463 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5464 		break;
5465 	case DRM_FORMAT_XRGB16161616F:
5466 	case DRM_FORMAT_ARGB16161616F:
5467 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5468 		break;
5469 	case DRM_FORMAT_XBGR16161616F:
5470 	case DRM_FORMAT_ABGR16161616F:
5471 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5472 		break;
5473 	case DRM_FORMAT_XRGB16161616:
5474 	case DRM_FORMAT_ARGB16161616:
5475 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5476 		break;
5477 	case DRM_FORMAT_XBGR16161616:
5478 	case DRM_FORMAT_ABGR16161616:
5479 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5480 		break;
5481 	default:
5482 		DRM_ERROR(
5483 			"Unsupported screen format %p4cc\n",
5484 			&fb->format->format);
5485 		return -EINVAL;
5486 	}
5487 
5488 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5489 	case DRM_MODE_ROTATE_0:
5490 		plane_info->rotation = ROTATION_ANGLE_0;
5491 		break;
5492 	case DRM_MODE_ROTATE_90:
5493 		plane_info->rotation = ROTATION_ANGLE_90;
5494 		break;
5495 	case DRM_MODE_ROTATE_180:
5496 		plane_info->rotation = ROTATION_ANGLE_180;
5497 		break;
5498 	case DRM_MODE_ROTATE_270:
5499 		plane_info->rotation = ROTATION_ANGLE_270;
5500 		break;
5501 	default:
5502 		plane_info->rotation = ROTATION_ANGLE_0;
5503 		break;
5504 	}
5505 
5506 	plane_info->visible = true;
5507 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5508 
5509 	plane_info->layer_index = 0;
5510 
5511 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5512 					  &plane_info->color_space);
5513 	if (ret)
5514 		return ret;
5515 
5516 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5517 					   plane_info->rotation, tiling_flags,
5518 					   &plane_info->tiling_info,
5519 					   &plane_info->plane_size,
5520 					   &plane_info->dcc, address, tmz_surface,
5521 					   force_disable_dcc);
5522 	if (ret)
5523 		return ret;
5524 
5525 	fill_blending_from_plane_state(
5526 		plane_state, &plane_info->per_pixel_alpha,
5527 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5528 
5529 	return 0;
5530 }
5531 
5532 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5533 				    struct dc_plane_state *dc_plane_state,
5534 				    struct drm_plane_state *plane_state,
5535 				    struct drm_crtc_state *crtc_state)
5536 {
5537 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5538 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5539 	struct dc_scaling_info scaling_info;
5540 	struct dc_plane_info plane_info;
5541 	int ret;
5542 	bool force_disable_dcc = false;
5543 
5544 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5545 	if (ret)
5546 		return ret;
5547 
5548 	dc_plane_state->src_rect = scaling_info.src_rect;
5549 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5550 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5551 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5552 
5553 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5554 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5555 					  afb->tiling_flags,
5556 					  &plane_info,
5557 					  &dc_plane_state->address,
5558 					  afb->tmz_surface,
5559 					  force_disable_dcc);
5560 	if (ret)
5561 		return ret;
5562 
5563 	dc_plane_state->format = plane_info.format;
5564 	dc_plane_state->color_space = plane_info.color_space;
5565 	dc_plane_state->format = plane_info.format;
5566 	dc_plane_state->plane_size = plane_info.plane_size;
5567 	dc_plane_state->rotation = plane_info.rotation;
5568 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5569 	dc_plane_state->stereo_format = plane_info.stereo_format;
5570 	dc_plane_state->tiling_info = plane_info.tiling_info;
5571 	dc_plane_state->visible = plane_info.visible;
5572 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5573 	dc_plane_state->global_alpha = plane_info.global_alpha;
5574 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5575 	dc_plane_state->dcc = plane_info.dcc;
5576 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5577 	dc_plane_state->flip_int_enabled = true;
5578 
5579 	/*
5580 	 * Always set input transfer function, since plane state is refreshed
5581 	 * every time.
5582 	 */
5583 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5584 	if (ret)
5585 		return ret;
5586 
5587 	return 0;
5588 }
5589 
5590 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5591 					   const struct dm_connector_state *dm_state,
5592 					   struct dc_stream_state *stream)
5593 {
5594 	enum amdgpu_rmx_type rmx_type;
5595 
5596 	struct rect src = { 0 }; /* viewport in composition space*/
5597 	struct rect dst = { 0 }; /* stream addressable area */
5598 
5599 	/* no mode. nothing to be done */
5600 	if (!mode)
5601 		return;
5602 
5603 	/* Full screen scaling by default */
5604 	src.width = mode->hdisplay;
5605 	src.height = mode->vdisplay;
5606 	dst.width = stream->timing.h_addressable;
5607 	dst.height = stream->timing.v_addressable;
5608 
5609 	if (dm_state) {
5610 		rmx_type = dm_state->scaling;
5611 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5612 			if (src.width * dst.height <
5613 					src.height * dst.width) {
5614 				/* height needs less upscaling/more downscaling */
5615 				dst.width = src.width *
5616 						dst.height / src.height;
5617 			} else {
5618 				/* width needs less upscaling/more downscaling */
5619 				dst.height = src.height *
5620 						dst.width / src.width;
5621 			}
5622 		} else if (rmx_type == RMX_CENTER) {
5623 			dst = src;
5624 		}
5625 
5626 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5627 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5628 
5629 		if (dm_state->underscan_enable) {
5630 			dst.x += dm_state->underscan_hborder / 2;
5631 			dst.y += dm_state->underscan_vborder / 2;
5632 			dst.width -= dm_state->underscan_hborder;
5633 			dst.height -= dm_state->underscan_vborder;
5634 		}
5635 	}
5636 
5637 	stream->src = src;
5638 	stream->dst = dst;
5639 
5640 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5641 		      dst.x, dst.y, dst.width, dst.height);
5642 
5643 }
5644 
5645 static enum dc_color_depth
5646 convert_color_depth_from_display_info(const struct drm_connector *connector,
5647 				      bool is_y420, int requested_bpc)
5648 {
5649 	uint8_t bpc;
5650 
5651 	if (is_y420) {
5652 		bpc = 8;
5653 
5654 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5655 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5656 			bpc = 16;
5657 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5658 			bpc = 12;
5659 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5660 			bpc = 10;
5661 	} else {
5662 		bpc = (uint8_t)connector->display_info.bpc;
5663 		/* Assume 8 bpc by default if no bpc is specified. */
5664 		bpc = bpc ? bpc : 8;
5665 	}
5666 
5667 	if (requested_bpc > 0) {
5668 		/*
5669 		 * Cap display bpc based on the user requested value.
5670 		 *
5671 		 * The value for state->max_bpc may not correctly updated
5672 		 * depending on when the connector gets added to the state
5673 		 * or if this was called outside of atomic check, so it
5674 		 * can't be used directly.
5675 		 */
5676 		bpc = min_t(u8, bpc, requested_bpc);
5677 
5678 		/* Round down to the nearest even number. */
5679 		bpc = bpc - (bpc & 1);
5680 	}
5681 
5682 	switch (bpc) {
5683 	case 0:
5684 		/*
5685 		 * Temporary Work around, DRM doesn't parse color depth for
5686 		 * EDID revision before 1.4
5687 		 * TODO: Fix edid parsing
5688 		 */
5689 		return COLOR_DEPTH_888;
5690 	case 6:
5691 		return COLOR_DEPTH_666;
5692 	case 8:
5693 		return COLOR_DEPTH_888;
5694 	case 10:
5695 		return COLOR_DEPTH_101010;
5696 	case 12:
5697 		return COLOR_DEPTH_121212;
5698 	case 14:
5699 		return COLOR_DEPTH_141414;
5700 	case 16:
5701 		return COLOR_DEPTH_161616;
5702 	default:
5703 		return COLOR_DEPTH_UNDEFINED;
5704 	}
5705 }
5706 
5707 static enum dc_aspect_ratio
5708 get_aspect_ratio(const struct drm_display_mode *mode_in)
5709 {
5710 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5711 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5712 }
5713 
5714 static enum dc_color_space
5715 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5716 {
5717 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5718 
5719 	switch (dc_crtc_timing->pixel_encoding)	{
5720 	case PIXEL_ENCODING_YCBCR422:
5721 	case PIXEL_ENCODING_YCBCR444:
5722 	case PIXEL_ENCODING_YCBCR420:
5723 	{
5724 		/*
5725 		 * 27030khz is the separation point between HDTV and SDTV
5726 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5727 		 * respectively
5728 		 */
5729 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5730 			if (dc_crtc_timing->flags.Y_ONLY)
5731 				color_space =
5732 					COLOR_SPACE_YCBCR709_LIMITED;
5733 			else
5734 				color_space = COLOR_SPACE_YCBCR709;
5735 		} else {
5736 			if (dc_crtc_timing->flags.Y_ONLY)
5737 				color_space =
5738 					COLOR_SPACE_YCBCR601_LIMITED;
5739 			else
5740 				color_space = COLOR_SPACE_YCBCR601;
5741 		}
5742 
5743 	}
5744 	break;
5745 	case PIXEL_ENCODING_RGB:
5746 		color_space = COLOR_SPACE_SRGB;
5747 		break;
5748 
5749 	default:
5750 		WARN_ON(1);
5751 		break;
5752 	}
5753 
5754 	return color_space;
5755 }
5756 
5757 static bool adjust_colour_depth_from_display_info(
5758 	struct dc_crtc_timing *timing_out,
5759 	const struct drm_display_info *info)
5760 {
5761 	enum dc_color_depth depth = timing_out->display_color_depth;
5762 	int normalized_clk;
5763 	do {
5764 		normalized_clk = timing_out->pix_clk_100hz / 10;
5765 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5766 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5767 			normalized_clk /= 2;
5768 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5769 		switch (depth) {
5770 		case COLOR_DEPTH_888:
5771 			break;
5772 		case COLOR_DEPTH_101010:
5773 			normalized_clk = (normalized_clk * 30) / 24;
5774 			break;
5775 		case COLOR_DEPTH_121212:
5776 			normalized_clk = (normalized_clk * 36) / 24;
5777 			break;
5778 		case COLOR_DEPTH_161616:
5779 			normalized_clk = (normalized_clk * 48) / 24;
5780 			break;
5781 		default:
5782 			/* The above depths are the only ones valid for HDMI. */
5783 			return false;
5784 		}
5785 		if (normalized_clk <= info->max_tmds_clock) {
5786 			timing_out->display_color_depth = depth;
5787 			return true;
5788 		}
5789 	} while (--depth > COLOR_DEPTH_666);
5790 	return false;
5791 }
5792 
5793 static void fill_stream_properties_from_drm_display_mode(
5794 	struct dc_stream_state *stream,
5795 	const struct drm_display_mode *mode_in,
5796 	const struct drm_connector *connector,
5797 	const struct drm_connector_state *connector_state,
5798 	const struct dc_stream_state *old_stream,
5799 	int requested_bpc)
5800 {
5801 	struct dc_crtc_timing *timing_out = &stream->timing;
5802 	const struct drm_display_info *info = &connector->display_info;
5803 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5804 	struct hdmi_vendor_infoframe hv_frame;
5805 	struct hdmi_avi_infoframe avi_frame;
5806 
5807 	memset(&hv_frame, 0, sizeof(hv_frame));
5808 	memset(&avi_frame, 0, sizeof(avi_frame));
5809 
5810 	timing_out->h_border_left = 0;
5811 	timing_out->h_border_right = 0;
5812 	timing_out->v_border_top = 0;
5813 	timing_out->v_border_bottom = 0;
5814 	/* TODO: un-hardcode */
5815 	if (drm_mode_is_420_only(info, mode_in)
5816 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5817 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5818 	else if (drm_mode_is_420_also(info, mode_in)
5819 			&& aconnector->force_yuv420_output)
5820 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5821 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5822 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5823 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5824 	else
5825 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5826 
5827 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5828 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5829 		connector,
5830 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5831 		requested_bpc);
5832 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5833 	timing_out->hdmi_vic = 0;
5834 
5835 	if(old_stream) {
5836 		timing_out->vic = old_stream->timing.vic;
5837 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5838 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5839 	} else {
5840 		timing_out->vic = drm_match_cea_mode(mode_in);
5841 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5842 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5843 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5844 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5845 	}
5846 
5847 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5848 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5849 		timing_out->vic = avi_frame.video_code;
5850 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5851 		timing_out->hdmi_vic = hv_frame.vic;
5852 	}
5853 
5854 	if (is_freesync_video_mode(mode_in, aconnector)) {
5855 		timing_out->h_addressable = mode_in->hdisplay;
5856 		timing_out->h_total = mode_in->htotal;
5857 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5858 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5859 		timing_out->v_total = mode_in->vtotal;
5860 		timing_out->v_addressable = mode_in->vdisplay;
5861 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5862 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5863 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5864 	} else {
5865 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5866 		timing_out->h_total = mode_in->crtc_htotal;
5867 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5868 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5869 		timing_out->v_total = mode_in->crtc_vtotal;
5870 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5871 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5872 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5873 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5874 	}
5875 
5876 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5877 
5878 	stream->output_color_space = get_output_color_space(timing_out);
5879 
5880 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5881 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5882 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5883 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5884 		    drm_mode_is_420_also(info, mode_in) &&
5885 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5886 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5887 			adjust_colour_depth_from_display_info(timing_out, info);
5888 		}
5889 	}
5890 }
5891 
5892 static void fill_audio_info(struct audio_info *audio_info,
5893 			    const struct drm_connector *drm_connector,
5894 			    const struct dc_sink *dc_sink)
5895 {
5896 	int i = 0;
5897 	int cea_revision = 0;
5898 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5899 
5900 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5901 	audio_info->product_id = edid_caps->product_id;
5902 
5903 	cea_revision = drm_connector->display_info.cea_rev;
5904 
5905 	strscpy(audio_info->display_name,
5906 		edid_caps->display_name,
5907 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5908 
5909 	if (cea_revision >= 3) {
5910 		audio_info->mode_count = edid_caps->audio_mode_count;
5911 
5912 		for (i = 0; i < audio_info->mode_count; ++i) {
5913 			audio_info->modes[i].format_code =
5914 					(enum audio_format_code)
5915 					(edid_caps->audio_modes[i].format_code);
5916 			audio_info->modes[i].channel_count =
5917 					edid_caps->audio_modes[i].channel_count;
5918 			audio_info->modes[i].sample_rates.all =
5919 					edid_caps->audio_modes[i].sample_rate;
5920 			audio_info->modes[i].sample_size =
5921 					edid_caps->audio_modes[i].sample_size;
5922 		}
5923 	}
5924 
5925 	audio_info->flags.all = edid_caps->speaker_flags;
5926 
5927 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5928 	if (drm_connector->latency_present[0]) {
5929 		audio_info->video_latency = drm_connector->video_latency[0];
5930 		audio_info->audio_latency = drm_connector->audio_latency[0];
5931 	}
5932 
5933 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5934 
5935 }
5936 
5937 static void
5938 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5939 				      struct drm_display_mode *dst_mode)
5940 {
5941 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5942 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5943 	dst_mode->crtc_clock = src_mode->crtc_clock;
5944 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5945 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5946 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5947 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5948 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5949 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5950 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5951 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5952 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5953 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5954 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5955 }
5956 
5957 static void
5958 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5959 					const struct drm_display_mode *native_mode,
5960 					bool scale_enabled)
5961 {
5962 	if (scale_enabled) {
5963 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5964 	} else if (native_mode->clock == drm_mode->clock &&
5965 			native_mode->htotal == drm_mode->htotal &&
5966 			native_mode->vtotal == drm_mode->vtotal) {
5967 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5968 	} else {
5969 		/* no scaling nor amdgpu inserted, no need to patch */
5970 	}
5971 }
5972 
5973 static struct dc_sink *
5974 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5975 {
5976 	struct dc_sink_init_data sink_init_data = { 0 };
5977 	struct dc_sink *sink = NULL;
5978 	sink_init_data.link = aconnector->dc_link;
5979 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5980 
5981 	sink = dc_sink_create(&sink_init_data);
5982 	if (!sink) {
5983 		DRM_ERROR("Failed to create sink!\n");
5984 		return NULL;
5985 	}
5986 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5987 
5988 	return sink;
5989 }
5990 
5991 static void set_multisync_trigger_params(
5992 		struct dc_stream_state *stream)
5993 {
5994 	struct dc_stream_state *master = NULL;
5995 
5996 	if (stream->triggered_crtc_reset.enabled) {
5997 		master = stream->triggered_crtc_reset.event_source;
5998 		stream->triggered_crtc_reset.event =
5999 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6000 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6001 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6002 	}
6003 }
6004 
6005 static void set_master_stream(struct dc_stream_state *stream_set[],
6006 			      int stream_count)
6007 {
6008 	int j, highest_rfr = 0, master_stream = 0;
6009 
6010 	for (j = 0;  j < stream_count; j++) {
6011 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6012 			int refresh_rate = 0;
6013 
6014 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6015 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6016 			if (refresh_rate > highest_rfr) {
6017 				highest_rfr = refresh_rate;
6018 				master_stream = j;
6019 			}
6020 		}
6021 	}
6022 	for (j = 0;  j < stream_count; j++) {
6023 		if (stream_set[j])
6024 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6025 	}
6026 }
6027 
6028 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6029 {
6030 	int i = 0;
6031 	struct dc_stream_state *stream;
6032 
6033 	if (context->stream_count < 2)
6034 		return;
6035 	for (i = 0; i < context->stream_count ; i++) {
6036 		if (!context->streams[i])
6037 			continue;
6038 		/*
6039 		 * TODO: add a function to read AMD VSDB bits and set
6040 		 * crtc_sync_master.multi_sync_enabled flag
6041 		 * For now it's set to false
6042 		 */
6043 	}
6044 
6045 	set_master_stream(context->streams, context->stream_count);
6046 
6047 	for (i = 0; i < context->stream_count ; i++) {
6048 		stream = context->streams[i];
6049 
6050 		if (!stream)
6051 			continue;
6052 
6053 		set_multisync_trigger_params(stream);
6054 	}
6055 }
6056 
6057 #if defined(CONFIG_DRM_AMD_DC_DCN)
6058 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6059 							struct dc_sink *sink, struct dc_stream_state *stream,
6060 							struct dsc_dec_dpcd_caps *dsc_caps)
6061 {
6062 	stream->timing.flags.DSC = 0;
6063 
6064 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6065 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6066 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6067 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6068 				      dsc_caps);
6069 	}
6070 }
6071 
6072 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6073 										struct dc_sink *sink, struct dc_stream_state *stream,
6074 										struct dsc_dec_dpcd_caps *dsc_caps)
6075 {
6076 	struct drm_connector *drm_connector = &aconnector->base;
6077 	uint32_t link_bandwidth_kbps;
6078 	uint32_t max_dsc_target_bpp_limit_override = 0;
6079 
6080 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6081 							dc_link_get_link_cap(aconnector->dc_link));
6082 
6083 	if (stream->link && stream->link->local_sink)
6084 		max_dsc_target_bpp_limit_override =
6085 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6086 
6087 	/* Set DSC policy according to dsc_clock_en */
6088 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6089 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6090 
6091 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6092 
6093 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6094 						dsc_caps,
6095 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6096 						max_dsc_target_bpp_limit_override,
6097 						link_bandwidth_kbps,
6098 						&stream->timing,
6099 						&stream->timing.dsc_cfg)) {
6100 			stream->timing.flags.DSC = 1;
6101 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6102 		}
6103 	}
6104 
6105 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6106 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6107 		stream->timing.flags.DSC = 1;
6108 
6109 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6110 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6111 
6112 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6113 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6114 
6115 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6116 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6117 }
6118 #endif /* CONFIG_DRM_AMD_DC_DCN */
6119 
6120 /**
6121  * DOC: FreeSync Video
6122  *
6123  * When a userspace application wants to play a video, the content follows a
6124  * standard format definition that usually specifies the FPS for that format.
6125  * The below list illustrates some video format and the expected FPS,
6126  * respectively:
6127  *
6128  * - TV/NTSC (23.976 FPS)
6129  * - Cinema (24 FPS)
6130  * - TV/PAL (25 FPS)
6131  * - TV/NTSC (29.97 FPS)
6132  * - TV/NTSC (30 FPS)
6133  * - Cinema HFR (48 FPS)
6134  * - TV/PAL (50 FPS)
6135  * - Commonly used (60 FPS)
6136  * - Multiples of 24 (48,72,96,120 FPS)
6137  *
6138  * The list of standards video format is not huge and can be added to the
6139  * connector modeset list beforehand. With that, userspace can leverage
6140  * FreeSync to extends the front porch in order to attain the target refresh
6141  * rate. Such a switch will happen seamlessly, without screen blanking or
6142  * reprogramming of the output in any other way. If the userspace requests a
6143  * modesetting change compatible with FreeSync modes that only differ in the
6144  * refresh rate, DC will skip the full update and avoid blink during the
6145  * transition. For example, the video player can change the modesetting from
6146  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6147  * causing any display blink. This same concept can be applied to a mode
6148  * setting change.
6149  */
6150 static struct drm_display_mode *
6151 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6152 			  bool use_probed_modes)
6153 {
6154 	struct drm_display_mode *m, *m_pref = NULL;
6155 	u16 current_refresh, highest_refresh;
6156 	struct list_head *list_head = use_probed_modes ?
6157 						    &aconnector->base.probed_modes :
6158 						    &aconnector->base.modes;
6159 
6160 	if (aconnector->freesync_vid_base.clock != 0)
6161 		return &aconnector->freesync_vid_base;
6162 
6163 	/* Find the preferred mode */
6164 	list_for_each_entry (m, list_head, head) {
6165 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6166 			m_pref = m;
6167 			break;
6168 		}
6169 	}
6170 
6171 	if (!m_pref) {
6172 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6173 		m_pref = list_first_entry_or_null(
6174 			&aconnector->base.modes, struct drm_display_mode, head);
6175 		if (!m_pref) {
6176 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6177 			return NULL;
6178 		}
6179 	}
6180 
6181 	highest_refresh = drm_mode_vrefresh(m_pref);
6182 
6183 	/*
6184 	 * Find the mode with highest refresh rate with same resolution.
6185 	 * For some monitors, preferred mode is not the mode with highest
6186 	 * supported refresh rate.
6187 	 */
6188 	list_for_each_entry (m, list_head, head) {
6189 		current_refresh  = drm_mode_vrefresh(m);
6190 
6191 		if (m->hdisplay == m_pref->hdisplay &&
6192 		    m->vdisplay == m_pref->vdisplay &&
6193 		    highest_refresh < current_refresh) {
6194 			highest_refresh = current_refresh;
6195 			m_pref = m;
6196 		}
6197 	}
6198 
6199 	aconnector->freesync_vid_base = *m_pref;
6200 	return m_pref;
6201 }
6202 
6203 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6204 				   struct amdgpu_dm_connector *aconnector)
6205 {
6206 	struct drm_display_mode *high_mode;
6207 	int timing_diff;
6208 
6209 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6210 	if (!high_mode || !mode)
6211 		return false;
6212 
6213 	timing_diff = high_mode->vtotal - mode->vtotal;
6214 
6215 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6216 	    high_mode->hdisplay != mode->hdisplay ||
6217 	    high_mode->vdisplay != mode->vdisplay ||
6218 	    high_mode->hsync_start != mode->hsync_start ||
6219 	    high_mode->hsync_end != mode->hsync_end ||
6220 	    high_mode->htotal != mode->htotal ||
6221 	    high_mode->hskew != mode->hskew ||
6222 	    high_mode->vscan != mode->vscan ||
6223 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6224 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6225 		return false;
6226 	else
6227 		return true;
6228 }
6229 
6230 static struct dc_stream_state *
6231 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6232 		       const struct drm_display_mode *drm_mode,
6233 		       const struct dm_connector_state *dm_state,
6234 		       const struct dc_stream_state *old_stream,
6235 		       int requested_bpc)
6236 {
6237 	struct drm_display_mode *preferred_mode = NULL;
6238 	struct drm_connector *drm_connector;
6239 	const struct drm_connector_state *con_state =
6240 		dm_state ? &dm_state->base : NULL;
6241 	struct dc_stream_state *stream = NULL;
6242 	struct drm_display_mode mode = *drm_mode;
6243 	struct drm_display_mode saved_mode;
6244 	struct drm_display_mode *freesync_mode = NULL;
6245 	bool native_mode_found = false;
6246 	bool recalculate_timing = false;
6247 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6248 	int mode_refresh;
6249 	int preferred_refresh = 0;
6250 #if defined(CONFIG_DRM_AMD_DC_DCN)
6251 	struct dsc_dec_dpcd_caps dsc_caps;
6252 #endif
6253 	struct dc_sink *sink = NULL;
6254 
6255 	memset(&saved_mode, 0, sizeof(saved_mode));
6256 
6257 	if (aconnector == NULL) {
6258 		DRM_ERROR("aconnector is NULL!\n");
6259 		return stream;
6260 	}
6261 
6262 	drm_connector = &aconnector->base;
6263 
6264 	if (!aconnector->dc_sink) {
6265 		sink = create_fake_sink(aconnector);
6266 		if (!sink)
6267 			return stream;
6268 	} else {
6269 		sink = aconnector->dc_sink;
6270 		dc_sink_retain(sink);
6271 	}
6272 
6273 	stream = dc_create_stream_for_sink(sink);
6274 
6275 	if (stream == NULL) {
6276 		DRM_ERROR("Failed to create stream for sink!\n");
6277 		goto finish;
6278 	}
6279 
6280 	stream->dm_stream_context = aconnector;
6281 
6282 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6283 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6284 
6285 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6286 		/* Search for preferred mode */
6287 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6288 			native_mode_found = true;
6289 			break;
6290 		}
6291 	}
6292 	if (!native_mode_found)
6293 		preferred_mode = list_first_entry_or_null(
6294 				&aconnector->base.modes,
6295 				struct drm_display_mode,
6296 				head);
6297 
6298 	mode_refresh = drm_mode_vrefresh(&mode);
6299 
6300 	if (preferred_mode == NULL) {
6301 		/*
6302 		 * This may not be an error, the use case is when we have no
6303 		 * usermode calls to reset and set mode upon hotplug. In this
6304 		 * case, we call set mode ourselves to restore the previous mode
6305 		 * and the modelist may not be filled in in time.
6306 		 */
6307 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6308 	} else {
6309 		recalculate_timing = amdgpu_freesync_vid_mode &&
6310 				 is_freesync_video_mode(&mode, aconnector);
6311 		if (recalculate_timing) {
6312 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6313 			saved_mode = mode;
6314 			mode = *freesync_mode;
6315 		} else {
6316 			decide_crtc_timing_for_drm_display_mode(
6317 				&mode, preferred_mode, scale);
6318 
6319 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6320 		}
6321 	}
6322 
6323 	if (recalculate_timing)
6324 		drm_mode_set_crtcinfo(&saved_mode, 0);
6325 	else if (!dm_state)
6326 		drm_mode_set_crtcinfo(&mode, 0);
6327 
6328        /*
6329 	* If scaling is enabled and refresh rate didn't change
6330 	* we copy the vic and polarities of the old timings
6331 	*/
6332 	if (!scale || mode_refresh != preferred_refresh)
6333 		fill_stream_properties_from_drm_display_mode(
6334 			stream, &mode, &aconnector->base, con_state, NULL,
6335 			requested_bpc);
6336 	else
6337 		fill_stream_properties_from_drm_display_mode(
6338 			stream, &mode, &aconnector->base, con_state, old_stream,
6339 			requested_bpc);
6340 
6341 #if defined(CONFIG_DRM_AMD_DC_DCN)
6342 	/* SST DSC determination policy */
6343 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6344 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6345 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6346 #endif
6347 
6348 	update_stream_scaling_settings(&mode, dm_state, stream);
6349 
6350 	fill_audio_info(
6351 		&stream->audio_info,
6352 		drm_connector,
6353 		sink);
6354 
6355 	update_stream_signal(stream, sink);
6356 
6357 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6358 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6359 
6360 	if (stream->link->psr_settings.psr_feature_enabled) {
6361 		//
6362 		// should decide stream support vsc sdp colorimetry capability
6363 		// before building vsc info packet
6364 		//
6365 		stream->use_vsc_sdp_for_colorimetry = false;
6366 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6367 			stream->use_vsc_sdp_for_colorimetry =
6368 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6369 		} else {
6370 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6371 				stream->use_vsc_sdp_for_colorimetry = true;
6372 		}
6373 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6374 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6375 
6376 	}
6377 finish:
6378 	dc_sink_release(sink);
6379 
6380 	return stream;
6381 }
6382 
6383 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6384 {
6385 	drm_crtc_cleanup(crtc);
6386 	kfree(crtc);
6387 }
6388 
6389 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6390 				  struct drm_crtc_state *state)
6391 {
6392 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6393 
6394 	/* TODO Destroy dc_stream objects are stream object is flattened */
6395 	if (cur->stream)
6396 		dc_stream_release(cur->stream);
6397 
6398 
6399 	__drm_atomic_helper_crtc_destroy_state(state);
6400 
6401 
6402 	kfree(state);
6403 }
6404 
6405 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6406 {
6407 	struct dm_crtc_state *state;
6408 
6409 	if (crtc->state)
6410 		dm_crtc_destroy_state(crtc, crtc->state);
6411 
6412 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6413 	if (WARN_ON(!state))
6414 		return;
6415 
6416 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6417 }
6418 
6419 static struct drm_crtc_state *
6420 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6421 {
6422 	struct dm_crtc_state *state, *cur;
6423 
6424 	cur = to_dm_crtc_state(crtc->state);
6425 
6426 	if (WARN_ON(!crtc->state))
6427 		return NULL;
6428 
6429 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6430 	if (!state)
6431 		return NULL;
6432 
6433 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6434 
6435 	if (cur->stream) {
6436 		state->stream = cur->stream;
6437 		dc_stream_retain(state->stream);
6438 	}
6439 
6440 	state->active_planes = cur->active_planes;
6441 	state->vrr_infopacket = cur->vrr_infopacket;
6442 	state->abm_level = cur->abm_level;
6443 	state->vrr_supported = cur->vrr_supported;
6444 	state->freesync_config = cur->freesync_config;
6445 	state->cm_has_degamma = cur->cm_has_degamma;
6446 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6447 	state->force_dpms_off = cur->force_dpms_off;
6448 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6449 
6450 	return &state->base;
6451 }
6452 
6453 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6454 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6455 {
6456 	crtc_debugfs_init(crtc);
6457 
6458 	return 0;
6459 }
6460 #endif
6461 
6462 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6463 {
6464 	enum dc_irq_source irq_source;
6465 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6466 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6467 	int rc;
6468 
6469 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6470 
6471 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6472 
6473 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6474 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6475 	return rc;
6476 }
6477 
6478 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6479 {
6480 	enum dc_irq_source irq_source;
6481 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6482 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6483 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6484 #if defined(CONFIG_DRM_AMD_DC_DCN)
6485 	struct amdgpu_display_manager *dm = &adev->dm;
6486 	struct vblank_control_work *work;
6487 #endif
6488 	int rc = 0;
6489 
6490 	if (enable) {
6491 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6492 		if (amdgpu_dm_vrr_active(acrtc_state))
6493 			rc = dm_set_vupdate_irq(crtc, true);
6494 	} else {
6495 		/* vblank irq off -> vupdate irq off */
6496 		rc = dm_set_vupdate_irq(crtc, false);
6497 	}
6498 
6499 	if (rc)
6500 		return rc;
6501 
6502 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6503 
6504 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6505 		return -EBUSY;
6506 
6507 	if (amdgpu_in_reset(adev))
6508 		return 0;
6509 
6510 #if defined(CONFIG_DRM_AMD_DC_DCN)
6511 	if (dm->vblank_control_workqueue) {
6512 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6513 		if (!work)
6514 			return -ENOMEM;
6515 
6516 		INIT_WORK(&work->work, vblank_control_worker);
6517 		work->dm = dm;
6518 		work->acrtc = acrtc;
6519 		work->enable = enable;
6520 
6521 		if (acrtc_state->stream) {
6522 			dc_stream_retain(acrtc_state->stream);
6523 			work->stream = acrtc_state->stream;
6524 		}
6525 
6526 		queue_work(dm->vblank_control_workqueue, &work->work);
6527 	}
6528 #endif
6529 
6530 	return 0;
6531 }
6532 
6533 static int dm_enable_vblank(struct drm_crtc *crtc)
6534 {
6535 	return dm_set_vblank(crtc, true);
6536 }
6537 
6538 static void dm_disable_vblank(struct drm_crtc *crtc)
6539 {
6540 	dm_set_vblank(crtc, false);
6541 }
6542 
6543 /* Implemented only the options currently availible for the driver */
6544 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6545 	.reset = dm_crtc_reset_state,
6546 	.destroy = amdgpu_dm_crtc_destroy,
6547 	.set_config = drm_atomic_helper_set_config,
6548 	.page_flip = drm_atomic_helper_page_flip,
6549 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6550 	.atomic_destroy_state = dm_crtc_destroy_state,
6551 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6552 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6553 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6554 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6555 	.enable_vblank = dm_enable_vblank,
6556 	.disable_vblank = dm_disable_vblank,
6557 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6558 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6559 	.late_register = amdgpu_dm_crtc_late_register,
6560 #endif
6561 };
6562 
6563 static enum drm_connector_status
6564 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6565 {
6566 	bool connected;
6567 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6568 
6569 	/*
6570 	 * Notes:
6571 	 * 1. This interface is NOT called in context of HPD irq.
6572 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6573 	 * makes it a bad place for *any* MST-related activity.
6574 	 */
6575 
6576 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6577 	    !aconnector->fake_enable)
6578 		connected = (aconnector->dc_sink != NULL);
6579 	else
6580 		connected = (aconnector->base.force == DRM_FORCE_ON);
6581 
6582 	update_subconnector_property(aconnector);
6583 
6584 	return (connected ? connector_status_connected :
6585 			connector_status_disconnected);
6586 }
6587 
6588 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6589 					    struct drm_connector_state *connector_state,
6590 					    struct drm_property *property,
6591 					    uint64_t val)
6592 {
6593 	struct drm_device *dev = connector->dev;
6594 	struct amdgpu_device *adev = drm_to_adev(dev);
6595 	struct dm_connector_state *dm_old_state =
6596 		to_dm_connector_state(connector->state);
6597 	struct dm_connector_state *dm_new_state =
6598 		to_dm_connector_state(connector_state);
6599 
6600 	int ret = -EINVAL;
6601 
6602 	if (property == dev->mode_config.scaling_mode_property) {
6603 		enum amdgpu_rmx_type rmx_type;
6604 
6605 		switch (val) {
6606 		case DRM_MODE_SCALE_CENTER:
6607 			rmx_type = RMX_CENTER;
6608 			break;
6609 		case DRM_MODE_SCALE_ASPECT:
6610 			rmx_type = RMX_ASPECT;
6611 			break;
6612 		case DRM_MODE_SCALE_FULLSCREEN:
6613 			rmx_type = RMX_FULL;
6614 			break;
6615 		case DRM_MODE_SCALE_NONE:
6616 		default:
6617 			rmx_type = RMX_OFF;
6618 			break;
6619 		}
6620 
6621 		if (dm_old_state->scaling == rmx_type)
6622 			return 0;
6623 
6624 		dm_new_state->scaling = rmx_type;
6625 		ret = 0;
6626 	} else if (property == adev->mode_info.underscan_hborder_property) {
6627 		dm_new_state->underscan_hborder = val;
6628 		ret = 0;
6629 	} else if (property == adev->mode_info.underscan_vborder_property) {
6630 		dm_new_state->underscan_vborder = val;
6631 		ret = 0;
6632 	} else if (property == adev->mode_info.underscan_property) {
6633 		dm_new_state->underscan_enable = val;
6634 		ret = 0;
6635 	} else if (property == adev->mode_info.abm_level_property) {
6636 		dm_new_state->abm_level = val;
6637 		ret = 0;
6638 	}
6639 
6640 	return ret;
6641 }
6642 
6643 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6644 					    const struct drm_connector_state *state,
6645 					    struct drm_property *property,
6646 					    uint64_t *val)
6647 {
6648 	struct drm_device *dev = connector->dev;
6649 	struct amdgpu_device *adev = drm_to_adev(dev);
6650 	struct dm_connector_state *dm_state =
6651 		to_dm_connector_state(state);
6652 	int ret = -EINVAL;
6653 
6654 	if (property == dev->mode_config.scaling_mode_property) {
6655 		switch (dm_state->scaling) {
6656 		case RMX_CENTER:
6657 			*val = DRM_MODE_SCALE_CENTER;
6658 			break;
6659 		case RMX_ASPECT:
6660 			*val = DRM_MODE_SCALE_ASPECT;
6661 			break;
6662 		case RMX_FULL:
6663 			*val = DRM_MODE_SCALE_FULLSCREEN;
6664 			break;
6665 		case RMX_OFF:
6666 		default:
6667 			*val = DRM_MODE_SCALE_NONE;
6668 			break;
6669 		}
6670 		ret = 0;
6671 	} else if (property == adev->mode_info.underscan_hborder_property) {
6672 		*val = dm_state->underscan_hborder;
6673 		ret = 0;
6674 	} else if (property == adev->mode_info.underscan_vborder_property) {
6675 		*val = dm_state->underscan_vborder;
6676 		ret = 0;
6677 	} else if (property == adev->mode_info.underscan_property) {
6678 		*val = dm_state->underscan_enable;
6679 		ret = 0;
6680 	} else if (property == adev->mode_info.abm_level_property) {
6681 		*val = dm_state->abm_level;
6682 		ret = 0;
6683 	}
6684 
6685 	return ret;
6686 }
6687 
6688 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6689 {
6690 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6691 
6692 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6693 }
6694 
6695 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6696 {
6697 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6698 	const struct dc_link *link = aconnector->dc_link;
6699 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6700 	struct amdgpu_display_manager *dm = &adev->dm;
6701 	int i;
6702 
6703 	/*
6704 	 * Call only if mst_mgr was iniitalized before since it's not done
6705 	 * for all connector types.
6706 	 */
6707 	if (aconnector->mst_mgr.dev)
6708 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6709 
6710 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6711 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6712 	for (i = 0; i < dm->num_of_edps; i++) {
6713 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6714 			backlight_device_unregister(dm->backlight_dev[i]);
6715 			dm->backlight_dev[i] = NULL;
6716 		}
6717 	}
6718 #endif
6719 
6720 	if (aconnector->dc_em_sink)
6721 		dc_sink_release(aconnector->dc_em_sink);
6722 	aconnector->dc_em_sink = NULL;
6723 	if (aconnector->dc_sink)
6724 		dc_sink_release(aconnector->dc_sink);
6725 	aconnector->dc_sink = NULL;
6726 
6727 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6728 	drm_connector_unregister(connector);
6729 	drm_connector_cleanup(connector);
6730 	if (aconnector->i2c) {
6731 		i2c_del_adapter(&aconnector->i2c->base);
6732 		kfree(aconnector->i2c);
6733 	}
6734 	kfree(aconnector->dm_dp_aux.aux.name);
6735 
6736 	kfree(connector);
6737 }
6738 
6739 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6740 {
6741 	struct dm_connector_state *state =
6742 		to_dm_connector_state(connector->state);
6743 
6744 	if (connector->state)
6745 		__drm_atomic_helper_connector_destroy_state(connector->state);
6746 
6747 	kfree(state);
6748 
6749 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6750 
6751 	if (state) {
6752 		state->scaling = RMX_OFF;
6753 		state->underscan_enable = false;
6754 		state->underscan_hborder = 0;
6755 		state->underscan_vborder = 0;
6756 		state->base.max_requested_bpc = 8;
6757 		state->vcpi_slots = 0;
6758 		state->pbn = 0;
6759 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6760 			state->abm_level = amdgpu_dm_abm_level;
6761 
6762 		__drm_atomic_helper_connector_reset(connector, &state->base);
6763 	}
6764 }
6765 
6766 struct drm_connector_state *
6767 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6768 {
6769 	struct dm_connector_state *state =
6770 		to_dm_connector_state(connector->state);
6771 
6772 	struct dm_connector_state *new_state =
6773 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6774 
6775 	if (!new_state)
6776 		return NULL;
6777 
6778 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6779 
6780 	new_state->freesync_capable = state->freesync_capable;
6781 	new_state->abm_level = state->abm_level;
6782 	new_state->scaling = state->scaling;
6783 	new_state->underscan_enable = state->underscan_enable;
6784 	new_state->underscan_hborder = state->underscan_hborder;
6785 	new_state->underscan_vborder = state->underscan_vborder;
6786 	new_state->vcpi_slots = state->vcpi_slots;
6787 	new_state->pbn = state->pbn;
6788 	return &new_state->base;
6789 }
6790 
6791 static int
6792 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6793 {
6794 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6795 		to_amdgpu_dm_connector(connector);
6796 	int r;
6797 
6798 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6799 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6800 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6801 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6802 		if (r)
6803 			return r;
6804 	}
6805 
6806 #if defined(CONFIG_DEBUG_FS)
6807 	connector_debugfs_init(amdgpu_dm_connector);
6808 #endif
6809 
6810 	return 0;
6811 }
6812 
6813 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6814 	.reset = amdgpu_dm_connector_funcs_reset,
6815 	.detect = amdgpu_dm_connector_detect,
6816 	.fill_modes = drm_helper_probe_single_connector_modes,
6817 	.destroy = amdgpu_dm_connector_destroy,
6818 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6819 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6820 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6821 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6822 	.late_register = amdgpu_dm_connector_late_register,
6823 	.early_unregister = amdgpu_dm_connector_unregister
6824 };
6825 
6826 static int get_modes(struct drm_connector *connector)
6827 {
6828 	return amdgpu_dm_connector_get_modes(connector);
6829 }
6830 
6831 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6832 {
6833 	struct dc_sink_init_data init_params = {
6834 			.link = aconnector->dc_link,
6835 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6836 	};
6837 	struct edid *edid;
6838 
6839 	if (!aconnector->base.edid_blob_ptr) {
6840 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6841 				aconnector->base.name);
6842 
6843 		aconnector->base.force = DRM_FORCE_OFF;
6844 		aconnector->base.override_edid = false;
6845 		return;
6846 	}
6847 
6848 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6849 
6850 	aconnector->edid = edid;
6851 
6852 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6853 		aconnector->dc_link,
6854 		(uint8_t *)edid,
6855 		(edid->extensions + 1) * EDID_LENGTH,
6856 		&init_params);
6857 
6858 	if (aconnector->base.force == DRM_FORCE_ON) {
6859 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6860 		aconnector->dc_link->local_sink :
6861 		aconnector->dc_em_sink;
6862 		dc_sink_retain(aconnector->dc_sink);
6863 	}
6864 }
6865 
6866 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6867 {
6868 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6869 
6870 	/*
6871 	 * In case of headless boot with force on for DP managed connector
6872 	 * Those settings have to be != 0 to get initial modeset
6873 	 */
6874 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6875 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6876 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6877 	}
6878 
6879 
6880 	aconnector->base.override_edid = true;
6881 	create_eml_sink(aconnector);
6882 }
6883 
6884 static struct dc_stream_state *
6885 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6886 				const struct drm_display_mode *drm_mode,
6887 				const struct dm_connector_state *dm_state,
6888 				const struct dc_stream_state *old_stream)
6889 {
6890 	struct drm_connector *connector = &aconnector->base;
6891 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6892 	struct dc_stream_state *stream;
6893 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6894 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6895 	enum dc_status dc_result = DC_OK;
6896 
6897 	do {
6898 		stream = create_stream_for_sink(aconnector, drm_mode,
6899 						dm_state, old_stream,
6900 						requested_bpc);
6901 		if (stream == NULL) {
6902 			DRM_ERROR("Failed to create stream for sink!\n");
6903 			break;
6904 		}
6905 
6906 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6907 
6908 		if (dc_result != DC_OK) {
6909 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6910 				      drm_mode->hdisplay,
6911 				      drm_mode->vdisplay,
6912 				      drm_mode->clock,
6913 				      dc_result,
6914 				      dc_status_to_str(dc_result));
6915 
6916 			dc_stream_release(stream);
6917 			stream = NULL;
6918 			requested_bpc -= 2; /* lower bpc to retry validation */
6919 		}
6920 
6921 	} while (stream == NULL && requested_bpc >= 6);
6922 
6923 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6924 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6925 
6926 		aconnector->force_yuv420_output = true;
6927 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6928 						dm_state, old_stream);
6929 		aconnector->force_yuv420_output = false;
6930 	}
6931 
6932 	return stream;
6933 }
6934 
6935 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6936 				   struct drm_display_mode *mode)
6937 {
6938 	int result = MODE_ERROR;
6939 	struct dc_sink *dc_sink;
6940 	/* TODO: Unhardcode stream count */
6941 	struct dc_stream_state *stream;
6942 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6943 
6944 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6945 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6946 		return result;
6947 
6948 	/*
6949 	 * Only run this the first time mode_valid is called to initilialize
6950 	 * EDID mgmt
6951 	 */
6952 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6953 		!aconnector->dc_em_sink)
6954 		handle_edid_mgmt(aconnector);
6955 
6956 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6957 
6958 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6959 				aconnector->base.force != DRM_FORCE_ON) {
6960 		DRM_ERROR("dc_sink is NULL!\n");
6961 		goto fail;
6962 	}
6963 
6964 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6965 	if (stream) {
6966 		dc_stream_release(stream);
6967 		result = MODE_OK;
6968 	}
6969 
6970 fail:
6971 	/* TODO: error handling*/
6972 	return result;
6973 }
6974 
6975 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6976 				struct dc_info_packet *out)
6977 {
6978 	struct hdmi_drm_infoframe frame;
6979 	unsigned char buf[30]; /* 26 + 4 */
6980 	ssize_t len;
6981 	int ret, i;
6982 
6983 	memset(out, 0, sizeof(*out));
6984 
6985 	if (!state->hdr_output_metadata)
6986 		return 0;
6987 
6988 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6989 	if (ret)
6990 		return ret;
6991 
6992 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6993 	if (len < 0)
6994 		return (int)len;
6995 
6996 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6997 	if (len != 30)
6998 		return -EINVAL;
6999 
7000 	/* Prepare the infopacket for DC. */
7001 	switch (state->connector->connector_type) {
7002 	case DRM_MODE_CONNECTOR_HDMIA:
7003 		out->hb0 = 0x87; /* type */
7004 		out->hb1 = 0x01; /* version */
7005 		out->hb2 = 0x1A; /* length */
7006 		out->sb[0] = buf[3]; /* checksum */
7007 		i = 1;
7008 		break;
7009 
7010 	case DRM_MODE_CONNECTOR_DisplayPort:
7011 	case DRM_MODE_CONNECTOR_eDP:
7012 		out->hb0 = 0x00; /* sdp id, zero */
7013 		out->hb1 = 0x87; /* type */
7014 		out->hb2 = 0x1D; /* payload len - 1 */
7015 		out->hb3 = (0x13 << 2); /* sdp version */
7016 		out->sb[0] = 0x01; /* version */
7017 		out->sb[1] = 0x1A; /* length */
7018 		i = 2;
7019 		break;
7020 
7021 	default:
7022 		return -EINVAL;
7023 	}
7024 
7025 	memcpy(&out->sb[i], &buf[4], 26);
7026 	out->valid = true;
7027 
7028 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7029 		       sizeof(out->sb), false);
7030 
7031 	return 0;
7032 }
7033 
7034 static int
7035 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7036 				 struct drm_atomic_state *state)
7037 {
7038 	struct drm_connector_state *new_con_state =
7039 		drm_atomic_get_new_connector_state(state, conn);
7040 	struct drm_connector_state *old_con_state =
7041 		drm_atomic_get_old_connector_state(state, conn);
7042 	struct drm_crtc *crtc = new_con_state->crtc;
7043 	struct drm_crtc_state *new_crtc_state;
7044 	int ret;
7045 
7046 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7047 
7048 	if (!crtc)
7049 		return 0;
7050 
7051 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7052 		struct dc_info_packet hdr_infopacket;
7053 
7054 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7055 		if (ret)
7056 			return ret;
7057 
7058 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7059 		if (IS_ERR(new_crtc_state))
7060 			return PTR_ERR(new_crtc_state);
7061 
7062 		/*
7063 		 * DC considers the stream backends changed if the
7064 		 * static metadata changes. Forcing the modeset also
7065 		 * gives a simple way for userspace to switch from
7066 		 * 8bpc to 10bpc when setting the metadata to enter
7067 		 * or exit HDR.
7068 		 *
7069 		 * Changing the static metadata after it's been
7070 		 * set is permissible, however. So only force a
7071 		 * modeset if we're entering or exiting HDR.
7072 		 */
7073 		new_crtc_state->mode_changed =
7074 			!old_con_state->hdr_output_metadata ||
7075 			!new_con_state->hdr_output_metadata;
7076 	}
7077 
7078 	return 0;
7079 }
7080 
7081 static const struct drm_connector_helper_funcs
7082 amdgpu_dm_connector_helper_funcs = {
7083 	/*
7084 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7085 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7086 	 * are missing after user start lightdm. So we need to renew modes list.
7087 	 * in get_modes call back, not just return the modes count
7088 	 */
7089 	.get_modes = get_modes,
7090 	.mode_valid = amdgpu_dm_connector_mode_valid,
7091 	.atomic_check = amdgpu_dm_connector_atomic_check,
7092 };
7093 
7094 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7095 {
7096 }
7097 
7098 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7099 {
7100 	struct drm_atomic_state *state = new_crtc_state->state;
7101 	struct drm_plane *plane;
7102 	int num_active = 0;
7103 
7104 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7105 		struct drm_plane_state *new_plane_state;
7106 
7107 		/* Cursor planes are "fake". */
7108 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7109 			continue;
7110 
7111 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7112 
7113 		if (!new_plane_state) {
7114 			/*
7115 			 * The plane is enable on the CRTC and hasn't changed
7116 			 * state. This means that it previously passed
7117 			 * validation and is therefore enabled.
7118 			 */
7119 			num_active += 1;
7120 			continue;
7121 		}
7122 
7123 		/* We need a framebuffer to be considered enabled. */
7124 		num_active += (new_plane_state->fb != NULL);
7125 	}
7126 
7127 	return num_active;
7128 }
7129 
7130 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7131 					 struct drm_crtc_state *new_crtc_state)
7132 {
7133 	struct dm_crtc_state *dm_new_crtc_state =
7134 		to_dm_crtc_state(new_crtc_state);
7135 
7136 	dm_new_crtc_state->active_planes = 0;
7137 
7138 	if (!dm_new_crtc_state->stream)
7139 		return;
7140 
7141 	dm_new_crtc_state->active_planes =
7142 		count_crtc_active_planes(new_crtc_state);
7143 }
7144 
7145 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7146 				       struct drm_atomic_state *state)
7147 {
7148 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7149 									  crtc);
7150 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7151 	struct dc *dc = adev->dm.dc;
7152 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7153 	int ret = -EINVAL;
7154 
7155 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7156 
7157 	dm_update_crtc_active_planes(crtc, crtc_state);
7158 
7159 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7160 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7161 		return ret;
7162 	}
7163 
7164 	/*
7165 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7166 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7167 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7168 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7169 	 */
7170 	if (crtc_state->enable &&
7171 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7172 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7173 		return -EINVAL;
7174 	}
7175 
7176 	/* In some use cases, like reset, no stream is attached */
7177 	if (!dm_crtc_state->stream)
7178 		return 0;
7179 
7180 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7181 		return 0;
7182 
7183 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7184 	return ret;
7185 }
7186 
7187 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7188 				      const struct drm_display_mode *mode,
7189 				      struct drm_display_mode *adjusted_mode)
7190 {
7191 	return true;
7192 }
7193 
7194 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7195 	.disable = dm_crtc_helper_disable,
7196 	.atomic_check = dm_crtc_helper_atomic_check,
7197 	.mode_fixup = dm_crtc_helper_mode_fixup,
7198 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7199 };
7200 
7201 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7202 {
7203 
7204 }
7205 
7206 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7207 {
7208 	switch (display_color_depth) {
7209 		case COLOR_DEPTH_666:
7210 			return 6;
7211 		case COLOR_DEPTH_888:
7212 			return 8;
7213 		case COLOR_DEPTH_101010:
7214 			return 10;
7215 		case COLOR_DEPTH_121212:
7216 			return 12;
7217 		case COLOR_DEPTH_141414:
7218 			return 14;
7219 		case COLOR_DEPTH_161616:
7220 			return 16;
7221 		default:
7222 			break;
7223 		}
7224 	return 0;
7225 }
7226 
7227 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7228 					  struct drm_crtc_state *crtc_state,
7229 					  struct drm_connector_state *conn_state)
7230 {
7231 	struct drm_atomic_state *state = crtc_state->state;
7232 	struct drm_connector *connector = conn_state->connector;
7233 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7234 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7235 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7236 	struct drm_dp_mst_topology_mgr *mst_mgr;
7237 	struct drm_dp_mst_port *mst_port;
7238 	enum dc_color_depth color_depth;
7239 	int clock, bpp = 0;
7240 	bool is_y420 = false;
7241 
7242 	if (!aconnector->port || !aconnector->dc_sink)
7243 		return 0;
7244 
7245 	mst_port = aconnector->port;
7246 	mst_mgr = &aconnector->mst_port->mst_mgr;
7247 
7248 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7249 		return 0;
7250 
7251 	if (!state->duplicated) {
7252 		int max_bpc = conn_state->max_requested_bpc;
7253 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7254 				aconnector->force_yuv420_output;
7255 		color_depth = convert_color_depth_from_display_info(connector,
7256 								    is_y420,
7257 								    max_bpc);
7258 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7259 		clock = adjusted_mode->clock;
7260 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7261 	}
7262 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7263 									   mst_mgr,
7264 									   mst_port,
7265 									   dm_new_connector_state->pbn,
7266 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7267 	if (dm_new_connector_state->vcpi_slots < 0) {
7268 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7269 		return dm_new_connector_state->vcpi_slots;
7270 	}
7271 	return 0;
7272 }
7273 
7274 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7275 	.disable = dm_encoder_helper_disable,
7276 	.atomic_check = dm_encoder_helper_atomic_check
7277 };
7278 
7279 #if defined(CONFIG_DRM_AMD_DC_DCN)
7280 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7281 					    struct dc_state *dc_state,
7282 					    struct dsc_mst_fairness_vars *vars)
7283 {
7284 	struct dc_stream_state *stream = NULL;
7285 	struct drm_connector *connector;
7286 	struct drm_connector_state *new_con_state;
7287 	struct amdgpu_dm_connector *aconnector;
7288 	struct dm_connector_state *dm_conn_state;
7289 	int i, j;
7290 	int vcpi, pbn_div, pbn, slot_num = 0;
7291 
7292 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7293 
7294 		aconnector = to_amdgpu_dm_connector(connector);
7295 
7296 		if (!aconnector->port)
7297 			continue;
7298 
7299 		if (!new_con_state || !new_con_state->crtc)
7300 			continue;
7301 
7302 		dm_conn_state = to_dm_connector_state(new_con_state);
7303 
7304 		for (j = 0; j < dc_state->stream_count; j++) {
7305 			stream = dc_state->streams[j];
7306 			if (!stream)
7307 				continue;
7308 
7309 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7310 				break;
7311 
7312 			stream = NULL;
7313 		}
7314 
7315 		if (!stream)
7316 			continue;
7317 
7318 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7319 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7320 		for (j = 0; j < dc_state->stream_count; j++) {
7321 			if (vars[j].aconnector == aconnector) {
7322 				pbn = vars[j].pbn;
7323 				break;
7324 			}
7325 		}
7326 
7327 		if (j == dc_state->stream_count)
7328 			continue;
7329 
7330 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7331 
7332 		if (stream->timing.flags.DSC != 1) {
7333 			dm_conn_state->pbn = pbn;
7334 			dm_conn_state->vcpi_slots = slot_num;
7335 
7336 			drm_dp_mst_atomic_enable_dsc(state,
7337 						     aconnector->port,
7338 						     dm_conn_state->pbn,
7339 						     0,
7340 						     false);
7341 			continue;
7342 		}
7343 
7344 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7345 						    aconnector->port,
7346 						    pbn, pbn_div,
7347 						    true);
7348 		if (vcpi < 0)
7349 			return vcpi;
7350 
7351 		dm_conn_state->pbn = pbn;
7352 		dm_conn_state->vcpi_slots = vcpi;
7353 	}
7354 	return 0;
7355 }
7356 #endif
7357 
7358 static void dm_drm_plane_reset(struct drm_plane *plane)
7359 {
7360 	struct dm_plane_state *amdgpu_state = NULL;
7361 
7362 	if (plane->state)
7363 		plane->funcs->atomic_destroy_state(plane, plane->state);
7364 
7365 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7366 	WARN_ON(amdgpu_state == NULL);
7367 
7368 	if (amdgpu_state)
7369 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7370 }
7371 
7372 static struct drm_plane_state *
7373 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7374 {
7375 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7376 
7377 	old_dm_plane_state = to_dm_plane_state(plane->state);
7378 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7379 	if (!dm_plane_state)
7380 		return NULL;
7381 
7382 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7383 
7384 	if (old_dm_plane_state->dc_state) {
7385 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7386 		dc_plane_state_retain(dm_plane_state->dc_state);
7387 	}
7388 
7389 	return &dm_plane_state->base;
7390 }
7391 
7392 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7393 				struct drm_plane_state *state)
7394 {
7395 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7396 
7397 	if (dm_plane_state->dc_state)
7398 		dc_plane_state_release(dm_plane_state->dc_state);
7399 
7400 	drm_atomic_helper_plane_destroy_state(plane, state);
7401 }
7402 
7403 static const struct drm_plane_funcs dm_plane_funcs = {
7404 	.update_plane	= drm_atomic_helper_update_plane,
7405 	.disable_plane	= drm_atomic_helper_disable_plane,
7406 	.destroy	= drm_primary_helper_destroy,
7407 	.reset = dm_drm_plane_reset,
7408 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7409 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7410 	.format_mod_supported = dm_plane_format_mod_supported,
7411 };
7412 
7413 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7414 				      struct drm_plane_state *new_state)
7415 {
7416 	struct amdgpu_framebuffer *afb;
7417 	struct drm_gem_object *obj;
7418 	struct amdgpu_device *adev;
7419 	struct amdgpu_bo *rbo;
7420 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7421 	struct list_head list;
7422 	struct ttm_validate_buffer tv;
7423 	struct ww_acquire_ctx ticket;
7424 	uint32_t domain;
7425 	int r;
7426 
7427 	if (!new_state->fb) {
7428 		DRM_DEBUG_KMS("No FB bound\n");
7429 		return 0;
7430 	}
7431 
7432 	afb = to_amdgpu_framebuffer(new_state->fb);
7433 	obj = new_state->fb->obj[0];
7434 	rbo = gem_to_amdgpu_bo(obj);
7435 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7436 	INIT_LIST_HEAD(&list);
7437 
7438 	tv.bo = &rbo->tbo;
7439 	tv.num_shared = 1;
7440 	list_add(&tv.head, &list);
7441 
7442 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7443 	if (r) {
7444 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7445 		return r;
7446 	}
7447 
7448 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7449 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7450 	else
7451 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7452 
7453 	r = amdgpu_bo_pin(rbo, domain);
7454 	if (unlikely(r != 0)) {
7455 		if (r != -ERESTARTSYS)
7456 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7457 		ttm_eu_backoff_reservation(&ticket, &list);
7458 		return r;
7459 	}
7460 
7461 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7462 	if (unlikely(r != 0)) {
7463 		amdgpu_bo_unpin(rbo);
7464 		ttm_eu_backoff_reservation(&ticket, &list);
7465 		DRM_ERROR("%p bind failed\n", rbo);
7466 		return r;
7467 	}
7468 
7469 	ttm_eu_backoff_reservation(&ticket, &list);
7470 
7471 	afb->address = amdgpu_bo_gpu_offset(rbo);
7472 
7473 	amdgpu_bo_ref(rbo);
7474 
7475 	/**
7476 	 * We don't do surface updates on planes that have been newly created,
7477 	 * but we also don't have the afb->address during atomic check.
7478 	 *
7479 	 * Fill in buffer attributes depending on the address here, but only on
7480 	 * newly created planes since they're not being used by DC yet and this
7481 	 * won't modify global state.
7482 	 */
7483 	dm_plane_state_old = to_dm_plane_state(plane->state);
7484 	dm_plane_state_new = to_dm_plane_state(new_state);
7485 
7486 	if (dm_plane_state_new->dc_state &&
7487 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7488 		struct dc_plane_state *plane_state =
7489 			dm_plane_state_new->dc_state;
7490 		bool force_disable_dcc = !plane_state->dcc.enable;
7491 
7492 		fill_plane_buffer_attributes(
7493 			adev, afb, plane_state->format, plane_state->rotation,
7494 			afb->tiling_flags,
7495 			&plane_state->tiling_info, &plane_state->plane_size,
7496 			&plane_state->dcc, &plane_state->address,
7497 			afb->tmz_surface, force_disable_dcc);
7498 	}
7499 
7500 	return 0;
7501 }
7502 
7503 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7504 				       struct drm_plane_state *old_state)
7505 {
7506 	struct amdgpu_bo *rbo;
7507 	int r;
7508 
7509 	if (!old_state->fb)
7510 		return;
7511 
7512 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7513 	r = amdgpu_bo_reserve(rbo, false);
7514 	if (unlikely(r)) {
7515 		DRM_ERROR("failed to reserve rbo before unpin\n");
7516 		return;
7517 	}
7518 
7519 	amdgpu_bo_unpin(rbo);
7520 	amdgpu_bo_unreserve(rbo);
7521 	amdgpu_bo_unref(&rbo);
7522 }
7523 
7524 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7525 				       struct drm_crtc_state *new_crtc_state)
7526 {
7527 	struct drm_framebuffer *fb = state->fb;
7528 	int min_downscale, max_upscale;
7529 	int min_scale = 0;
7530 	int max_scale = INT_MAX;
7531 
7532 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7533 	if (fb && state->crtc) {
7534 		/* Validate viewport to cover the case when only the position changes */
7535 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7536 			int viewport_width = state->crtc_w;
7537 			int viewport_height = state->crtc_h;
7538 
7539 			if (state->crtc_x < 0)
7540 				viewport_width += state->crtc_x;
7541 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7542 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7543 
7544 			if (state->crtc_y < 0)
7545 				viewport_height += state->crtc_y;
7546 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7547 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7548 
7549 			if (viewport_width < 0 || viewport_height < 0) {
7550 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7551 				return -EINVAL;
7552 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7553 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7554 				return -EINVAL;
7555 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7556 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7557 				return -EINVAL;
7558 			}
7559 
7560 		}
7561 
7562 		/* Get min/max allowed scaling factors from plane caps. */
7563 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7564 					     &min_downscale, &max_upscale);
7565 		/*
7566 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7567 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7568 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7569 		 */
7570 		min_scale = (1000 << 16) / max_upscale;
7571 		max_scale = (1000 << 16) / min_downscale;
7572 	}
7573 
7574 	return drm_atomic_helper_check_plane_state(
7575 		state, new_crtc_state, min_scale, max_scale, true, true);
7576 }
7577 
7578 static int dm_plane_atomic_check(struct drm_plane *plane,
7579 				 struct drm_atomic_state *state)
7580 {
7581 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7582 										 plane);
7583 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7584 	struct dc *dc = adev->dm.dc;
7585 	struct dm_plane_state *dm_plane_state;
7586 	struct dc_scaling_info scaling_info;
7587 	struct drm_crtc_state *new_crtc_state;
7588 	int ret;
7589 
7590 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7591 
7592 	dm_plane_state = to_dm_plane_state(new_plane_state);
7593 
7594 	if (!dm_plane_state->dc_state)
7595 		return 0;
7596 
7597 	new_crtc_state =
7598 		drm_atomic_get_new_crtc_state(state,
7599 					      new_plane_state->crtc);
7600 	if (!new_crtc_state)
7601 		return -EINVAL;
7602 
7603 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7604 	if (ret)
7605 		return ret;
7606 
7607 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7608 	if (ret)
7609 		return ret;
7610 
7611 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7612 		return 0;
7613 
7614 	return -EINVAL;
7615 }
7616 
7617 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7618 				       struct drm_atomic_state *state)
7619 {
7620 	/* Only support async updates on cursor planes. */
7621 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7622 		return -EINVAL;
7623 
7624 	return 0;
7625 }
7626 
7627 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7628 					 struct drm_atomic_state *state)
7629 {
7630 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7631 									   plane);
7632 	struct drm_plane_state *old_state =
7633 		drm_atomic_get_old_plane_state(state, plane);
7634 
7635 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7636 
7637 	swap(plane->state->fb, new_state->fb);
7638 
7639 	plane->state->src_x = new_state->src_x;
7640 	plane->state->src_y = new_state->src_y;
7641 	plane->state->src_w = new_state->src_w;
7642 	plane->state->src_h = new_state->src_h;
7643 	plane->state->crtc_x = new_state->crtc_x;
7644 	plane->state->crtc_y = new_state->crtc_y;
7645 	plane->state->crtc_w = new_state->crtc_w;
7646 	plane->state->crtc_h = new_state->crtc_h;
7647 
7648 	handle_cursor_update(plane, old_state);
7649 }
7650 
7651 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7652 	.prepare_fb = dm_plane_helper_prepare_fb,
7653 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7654 	.atomic_check = dm_plane_atomic_check,
7655 	.atomic_async_check = dm_plane_atomic_async_check,
7656 	.atomic_async_update = dm_plane_atomic_async_update
7657 };
7658 
7659 /*
7660  * TODO: these are currently initialized to rgb formats only.
7661  * For future use cases we should either initialize them dynamically based on
7662  * plane capabilities, or initialize this array to all formats, so internal drm
7663  * check will succeed, and let DC implement proper check
7664  */
7665 static const uint32_t rgb_formats[] = {
7666 	DRM_FORMAT_XRGB8888,
7667 	DRM_FORMAT_ARGB8888,
7668 	DRM_FORMAT_RGBA8888,
7669 	DRM_FORMAT_XRGB2101010,
7670 	DRM_FORMAT_XBGR2101010,
7671 	DRM_FORMAT_ARGB2101010,
7672 	DRM_FORMAT_ABGR2101010,
7673 	DRM_FORMAT_XRGB16161616,
7674 	DRM_FORMAT_XBGR16161616,
7675 	DRM_FORMAT_ARGB16161616,
7676 	DRM_FORMAT_ABGR16161616,
7677 	DRM_FORMAT_XBGR8888,
7678 	DRM_FORMAT_ABGR8888,
7679 	DRM_FORMAT_RGB565,
7680 };
7681 
7682 static const uint32_t overlay_formats[] = {
7683 	DRM_FORMAT_XRGB8888,
7684 	DRM_FORMAT_ARGB8888,
7685 	DRM_FORMAT_RGBA8888,
7686 	DRM_FORMAT_XBGR8888,
7687 	DRM_FORMAT_ABGR8888,
7688 	DRM_FORMAT_RGB565
7689 };
7690 
7691 static const u32 cursor_formats[] = {
7692 	DRM_FORMAT_ARGB8888
7693 };
7694 
7695 static int get_plane_formats(const struct drm_plane *plane,
7696 			     const struct dc_plane_cap *plane_cap,
7697 			     uint32_t *formats, int max_formats)
7698 {
7699 	int i, num_formats = 0;
7700 
7701 	/*
7702 	 * TODO: Query support for each group of formats directly from
7703 	 * DC plane caps. This will require adding more formats to the
7704 	 * caps list.
7705 	 */
7706 
7707 	switch (plane->type) {
7708 	case DRM_PLANE_TYPE_PRIMARY:
7709 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7710 			if (num_formats >= max_formats)
7711 				break;
7712 
7713 			formats[num_formats++] = rgb_formats[i];
7714 		}
7715 
7716 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7717 			formats[num_formats++] = DRM_FORMAT_NV12;
7718 		if (plane_cap && plane_cap->pixel_format_support.p010)
7719 			formats[num_formats++] = DRM_FORMAT_P010;
7720 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7721 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7722 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7723 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7724 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7725 		}
7726 		break;
7727 
7728 	case DRM_PLANE_TYPE_OVERLAY:
7729 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7730 			if (num_formats >= max_formats)
7731 				break;
7732 
7733 			formats[num_formats++] = overlay_formats[i];
7734 		}
7735 		break;
7736 
7737 	case DRM_PLANE_TYPE_CURSOR:
7738 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7739 			if (num_formats >= max_formats)
7740 				break;
7741 
7742 			formats[num_formats++] = cursor_formats[i];
7743 		}
7744 		break;
7745 	}
7746 
7747 	return num_formats;
7748 }
7749 
7750 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7751 				struct drm_plane *plane,
7752 				unsigned long possible_crtcs,
7753 				const struct dc_plane_cap *plane_cap)
7754 {
7755 	uint32_t formats[32];
7756 	int num_formats;
7757 	int res = -EPERM;
7758 	unsigned int supported_rotations;
7759 	uint64_t *modifiers = NULL;
7760 
7761 	num_formats = get_plane_formats(plane, plane_cap, formats,
7762 					ARRAY_SIZE(formats));
7763 
7764 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7765 	if (res)
7766 		return res;
7767 
7768 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7769 				       &dm_plane_funcs, formats, num_formats,
7770 				       modifiers, plane->type, NULL);
7771 	kfree(modifiers);
7772 	if (res)
7773 		return res;
7774 
7775 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7776 	    plane_cap && plane_cap->per_pixel_alpha) {
7777 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7778 					  BIT(DRM_MODE_BLEND_PREMULTI);
7779 
7780 		drm_plane_create_alpha_property(plane);
7781 		drm_plane_create_blend_mode_property(plane, blend_caps);
7782 	}
7783 
7784 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7785 	    plane_cap &&
7786 	    (plane_cap->pixel_format_support.nv12 ||
7787 	     plane_cap->pixel_format_support.p010)) {
7788 		/* This only affects YUV formats. */
7789 		drm_plane_create_color_properties(
7790 			plane,
7791 			BIT(DRM_COLOR_YCBCR_BT601) |
7792 			BIT(DRM_COLOR_YCBCR_BT709) |
7793 			BIT(DRM_COLOR_YCBCR_BT2020),
7794 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7795 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7796 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7797 	}
7798 
7799 	supported_rotations =
7800 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7801 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7802 
7803 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7804 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7805 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7806 						   supported_rotations);
7807 
7808 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7809 
7810 	/* Create (reset) the plane state */
7811 	if (plane->funcs->reset)
7812 		plane->funcs->reset(plane);
7813 
7814 	return 0;
7815 }
7816 
7817 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7818 			       struct drm_plane *plane,
7819 			       uint32_t crtc_index)
7820 {
7821 	struct amdgpu_crtc *acrtc = NULL;
7822 	struct drm_plane *cursor_plane;
7823 
7824 	int res = -ENOMEM;
7825 
7826 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7827 	if (!cursor_plane)
7828 		goto fail;
7829 
7830 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7831 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7832 
7833 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7834 	if (!acrtc)
7835 		goto fail;
7836 
7837 	res = drm_crtc_init_with_planes(
7838 			dm->ddev,
7839 			&acrtc->base,
7840 			plane,
7841 			cursor_plane,
7842 			&amdgpu_dm_crtc_funcs, NULL);
7843 
7844 	if (res)
7845 		goto fail;
7846 
7847 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7848 
7849 	/* Create (reset) the plane state */
7850 	if (acrtc->base.funcs->reset)
7851 		acrtc->base.funcs->reset(&acrtc->base);
7852 
7853 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7854 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7855 
7856 	acrtc->crtc_id = crtc_index;
7857 	acrtc->base.enabled = false;
7858 	acrtc->otg_inst = -1;
7859 
7860 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7861 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7862 				   true, MAX_COLOR_LUT_ENTRIES);
7863 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7864 
7865 	return 0;
7866 
7867 fail:
7868 	kfree(acrtc);
7869 	kfree(cursor_plane);
7870 	return res;
7871 }
7872 
7873 
7874 static int to_drm_connector_type(enum signal_type st)
7875 {
7876 	switch (st) {
7877 	case SIGNAL_TYPE_HDMI_TYPE_A:
7878 		return DRM_MODE_CONNECTOR_HDMIA;
7879 	case SIGNAL_TYPE_EDP:
7880 		return DRM_MODE_CONNECTOR_eDP;
7881 	case SIGNAL_TYPE_LVDS:
7882 		return DRM_MODE_CONNECTOR_LVDS;
7883 	case SIGNAL_TYPE_RGB:
7884 		return DRM_MODE_CONNECTOR_VGA;
7885 	case SIGNAL_TYPE_DISPLAY_PORT:
7886 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7887 		return DRM_MODE_CONNECTOR_DisplayPort;
7888 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7889 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7890 		return DRM_MODE_CONNECTOR_DVID;
7891 	case SIGNAL_TYPE_VIRTUAL:
7892 		return DRM_MODE_CONNECTOR_VIRTUAL;
7893 
7894 	default:
7895 		return DRM_MODE_CONNECTOR_Unknown;
7896 	}
7897 }
7898 
7899 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7900 {
7901 	struct drm_encoder *encoder;
7902 
7903 	/* There is only one encoder per connector */
7904 	drm_connector_for_each_possible_encoder(connector, encoder)
7905 		return encoder;
7906 
7907 	return NULL;
7908 }
7909 
7910 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7911 {
7912 	struct drm_encoder *encoder;
7913 	struct amdgpu_encoder *amdgpu_encoder;
7914 
7915 	encoder = amdgpu_dm_connector_to_encoder(connector);
7916 
7917 	if (encoder == NULL)
7918 		return;
7919 
7920 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7921 
7922 	amdgpu_encoder->native_mode.clock = 0;
7923 
7924 	if (!list_empty(&connector->probed_modes)) {
7925 		struct drm_display_mode *preferred_mode = NULL;
7926 
7927 		list_for_each_entry(preferred_mode,
7928 				    &connector->probed_modes,
7929 				    head) {
7930 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7931 				amdgpu_encoder->native_mode = *preferred_mode;
7932 
7933 			break;
7934 		}
7935 
7936 	}
7937 }
7938 
7939 static struct drm_display_mode *
7940 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7941 			     char *name,
7942 			     int hdisplay, int vdisplay)
7943 {
7944 	struct drm_device *dev = encoder->dev;
7945 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7946 	struct drm_display_mode *mode = NULL;
7947 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7948 
7949 	mode = drm_mode_duplicate(dev, native_mode);
7950 
7951 	if (mode == NULL)
7952 		return NULL;
7953 
7954 	mode->hdisplay = hdisplay;
7955 	mode->vdisplay = vdisplay;
7956 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7957 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7958 
7959 	return mode;
7960 
7961 }
7962 
7963 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7964 						 struct drm_connector *connector)
7965 {
7966 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7967 	struct drm_display_mode *mode = NULL;
7968 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7969 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7970 				to_amdgpu_dm_connector(connector);
7971 	int i;
7972 	int n;
7973 	struct mode_size {
7974 		char name[DRM_DISPLAY_MODE_LEN];
7975 		int w;
7976 		int h;
7977 	} common_modes[] = {
7978 		{  "640x480",  640,  480},
7979 		{  "800x600",  800,  600},
7980 		{ "1024x768", 1024,  768},
7981 		{ "1280x720", 1280,  720},
7982 		{ "1280x800", 1280,  800},
7983 		{"1280x1024", 1280, 1024},
7984 		{ "1440x900", 1440,  900},
7985 		{"1680x1050", 1680, 1050},
7986 		{"1600x1200", 1600, 1200},
7987 		{"1920x1080", 1920, 1080},
7988 		{"1920x1200", 1920, 1200}
7989 	};
7990 
7991 	n = ARRAY_SIZE(common_modes);
7992 
7993 	for (i = 0; i < n; i++) {
7994 		struct drm_display_mode *curmode = NULL;
7995 		bool mode_existed = false;
7996 
7997 		if (common_modes[i].w > native_mode->hdisplay ||
7998 		    common_modes[i].h > native_mode->vdisplay ||
7999 		   (common_modes[i].w == native_mode->hdisplay &&
8000 		    common_modes[i].h == native_mode->vdisplay))
8001 			continue;
8002 
8003 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8004 			if (common_modes[i].w == curmode->hdisplay &&
8005 			    common_modes[i].h == curmode->vdisplay) {
8006 				mode_existed = true;
8007 				break;
8008 			}
8009 		}
8010 
8011 		if (mode_existed)
8012 			continue;
8013 
8014 		mode = amdgpu_dm_create_common_mode(encoder,
8015 				common_modes[i].name, common_modes[i].w,
8016 				common_modes[i].h);
8017 		drm_mode_probed_add(connector, mode);
8018 		amdgpu_dm_connector->num_modes++;
8019 	}
8020 }
8021 
8022 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8023 {
8024 	struct drm_encoder *encoder;
8025 	struct amdgpu_encoder *amdgpu_encoder;
8026 	const struct drm_display_mode *native_mode;
8027 
8028 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8029 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8030 		return;
8031 
8032 	encoder = amdgpu_dm_connector_to_encoder(connector);
8033 	if (!encoder)
8034 		return;
8035 
8036 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8037 
8038 	native_mode = &amdgpu_encoder->native_mode;
8039 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8040 		return;
8041 
8042 	drm_connector_set_panel_orientation_with_quirk(connector,
8043 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8044 						       native_mode->hdisplay,
8045 						       native_mode->vdisplay);
8046 }
8047 
8048 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8049 					      struct edid *edid)
8050 {
8051 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8052 			to_amdgpu_dm_connector(connector);
8053 
8054 	if (edid) {
8055 		/* empty probed_modes */
8056 		INIT_LIST_HEAD(&connector->probed_modes);
8057 		amdgpu_dm_connector->num_modes =
8058 				drm_add_edid_modes(connector, edid);
8059 
8060 		/* sorting the probed modes before calling function
8061 		 * amdgpu_dm_get_native_mode() since EDID can have
8062 		 * more than one preferred mode. The modes that are
8063 		 * later in the probed mode list could be of higher
8064 		 * and preferred resolution. For example, 3840x2160
8065 		 * resolution in base EDID preferred timing and 4096x2160
8066 		 * preferred resolution in DID extension block later.
8067 		 */
8068 		drm_mode_sort(&connector->probed_modes);
8069 		amdgpu_dm_get_native_mode(connector);
8070 
8071 		/* Freesync capabilities are reset by calling
8072 		 * drm_add_edid_modes() and need to be
8073 		 * restored here.
8074 		 */
8075 		amdgpu_dm_update_freesync_caps(connector, edid);
8076 
8077 		amdgpu_set_panel_orientation(connector);
8078 	} else {
8079 		amdgpu_dm_connector->num_modes = 0;
8080 	}
8081 }
8082 
8083 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8084 			      struct drm_display_mode *mode)
8085 {
8086 	struct drm_display_mode *m;
8087 
8088 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8089 		if (drm_mode_equal(m, mode))
8090 			return true;
8091 	}
8092 
8093 	return false;
8094 }
8095 
8096 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8097 {
8098 	const struct drm_display_mode *m;
8099 	struct drm_display_mode *new_mode;
8100 	uint i;
8101 	uint32_t new_modes_count = 0;
8102 
8103 	/* Standard FPS values
8104 	 *
8105 	 * 23.976       - TV/NTSC
8106 	 * 24 	        - Cinema
8107 	 * 25 	        - TV/PAL
8108 	 * 29.97        - TV/NTSC
8109 	 * 30 	        - TV/NTSC
8110 	 * 48 	        - Cinema HFR
8111 	 * 50 	        - TV/PAL
8112 	 * 60 	        - Commonly used
8113 	 * 48,72,96,120 - Multiples of 24
8114 	 */
8115 	static const uint32_t common_rates[] = {
8116 		23976, 24000, 25000, 29970, 30000,
8117 		48000, 50000, 60000, 72000, 96000, 120000
8118 	};
8119 
8120 	/*
8121 	 * Find mode with highest refresh rate with the same resolution
8122 	 * as the preferred mode. Some monitors report a preferred mode
8123 	 * with lower resolution than the highest refresh rate supported.
8124 	 */
8125 
8126 	m = get_highest_refresh_rate_mode(aconnector, true);
8127 	if (!m)
8128 		return 0;
8129 
8130 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8131 		uint64_t target_vtotal, target_vtotal_diff;
8132 		uint64_t num, den;
8133 
8134 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8135 			continue;
8136 
8137 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8138 		    common_rates[i] > aconnector->max_vfreq * 1000)
8139 			continue;
8140 
8141 		num = (unsigned long long)m->clock * 1000 * 1000;
8142 		den = common_rates[i] * (unsigned long long)m->htotal;
8143 		target_vtotal = div_u64(num, den);
8144 		target_vtotal_diff = target_vtotal - m->vtotal;
8145 
8146 		/* Check for illegal modes */
8147 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8148 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8149 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8150 			continue;
8151 
8152 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8153 		if (!new_mode)
8154 			goto out;
8155 
8156 		new_mode->vtotal += (u16)target_vtotal_diff;
8157 		new_mode->vsync_start += (u16)target_vtotal_diff;
8158 		new_mode->vsync_end += (u16)target_vtotal_diff;
8159 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8160 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8161 
8162 		if (!is_duplicate_mode(aconnector, new_mode)) {
8163 			drm_mode_probed_add(&aconnector->base, new_mode);
8164 			new_modes_count += 1;
8165 		} else
8166 			drm_mode_destroy(aconnector->base.dev, new_mode);
8167 	}
8168  out:
8169 	return new_modes_count;
8170 }
8171 
8172 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8173 						   struct edid *edid)
8174 {
8175 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8176 		to_amdgpu_dm_connector(connector);
8177 
8178 	if (!(amdgpu_freesync_vid_mode && edid))
8179 		return;
8180 
8181 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8182 		amdgpu_dm_connector->num_modes +=
8183 			add_fs_modes(amdgpu_dm_connector);
8184 }
8185 
8186 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8187 {
8188 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8189 			to_amdgpu_dm_connector(connector);
8190 	struct drm_encoder *encoder;
8191 	struct edid *edid = amdgpu_dm_connector->edid;
8192 
8193 	encoder = amdgpu_dm_connector_to_encoder(connector);
8194 
8195 	if (!drm_edid_is_valid(edid)) {
8196 		amdgpu_dm_connector->num_modes =
8197 				drm_add_modes_noedid(connector, 640, 480);
8198 	} else {
8199 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8200 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8201 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8202 	}
8203 	amdgpu_dm_fbc_init(connector);
8204 
8205 	return amdgpu_dm_connector->num_modes;
8206 }
8207 
8208 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8209 				     struct amdgpu_dm_connector *aconnector,
8210 				     int connector_type,
8211 				     struct dc_link *link,
8212 				     int link_index)
8213 {
8214 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8215 
8216 	/*
8217 	 * Some of the properties below require access to state, like bpc.
8218 	 * Allocate some default initial connector state with our reset helper.
8219 	 */
8220 	if (aconnector->base.funcs->reset)
8221 		aconnector->base.funcs->reset(&aconnector->base);
8222 
8223 	aconnector->connector_id = link_index;
8224 	aconnector->dc_link = link;
8225 	aconnector->base.interlace_allowed = false;
8226 	aconnector->base.doublescan_allowed = false;
8227 	aconnector->base.stereo_allowed = false;
8228 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8229 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8230 	aconnector->audio_inst = -1;
8231 	mutex_init(&aconnector->hpd_lock);
8232 
8233 	/*
8234 	 * configure support HPD hot plug connector_>polled default value is 0
8235 	 * which means HPD hot plug not supported
8236 	 */
8237 	switch (connector_type) {
8238 	case DRM_MODE_CONNECTOR_HDMIA:
8239 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8240 		aconnector->base.ycbcr_420_allowed =
8241 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8242 		break;
8243 	case DRM_MODE_CONNECTOR_DisplayPort:
8244 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8245 		if (link->is_dig_mapping_flexible &&
8246 		    link->dc->res_pool->funcs->link_encs_assign) {
8247 			link->link_enc =
8248 				link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8249 			if (!link->link_enc)
8250 				link->link_enc =
8251 					link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8252 		}
8253 
8254 		if (link->link_enc)
8255 			aconnector->base.ycbcr_420_allowed =
8256 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8257 		break;
8258 	case DRM_MODE_CONNECTOR_DVID:
8259 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8260 		break;
8261 	default:
8262 		break;
8263 	}
8264 
8265 	drm_object_attach_property(&aconnector->base.base,
8266 				dm->ddev->mode_config.scaling_mode_property,
8267 				DRM_MODE_SCALE_NONE);
8268 
8269 	drm_object_attach_property(&aconnector->base.base,
8270 				adev->mode_info.underscan_property,
8271 				UNDERSCAN_OFF);
8272 	drm_object_attach_property(&aconnector->base.base,
8273 				adev->mode_info.underscan_hborder_property,
8274 				0);
8275 	drm_object_attach_property(&aconnector->base.base,
8276 				adev->mode_info.underscan_vborder_property,
8277 				0);
8278 
8279 	if (!aconnector->mst_port)
8280 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8281 
8282 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8283 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8284 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8285 
8286 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8287 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8288 		drm_object_attach_property(&aconnector->base.base,
8289 				adev->mode_info.abm_level_property, 0);
8290 	}
8291 
8292 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8293 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8294 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8295 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8296 
8297 		if (!aconnector->mst_port)
8298 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8299 
8300 #ifdef CONFIG_DRM_AMD_DC_HDCP
8301 		if (adev->dm.hdcp_workqueue)
8302 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8303 #endif
8304 	}
8305 }
8306 
8307 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8308 			      struct i2c_msg *msgs, int num)
8309 {
8310 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8311 	struct ddc_service *ddc_service = i2c->ddc_service;
8312 	struct i2c_command cmd;
8313 	int i;
8314 	int result = -EIO;
8315 
8316 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8317 
8318 	if (!cmd.payloads)
8319 		return result;
8320 
8321 	cmd.number_of_payloads = num;
8322 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8323 	cmd.speed = 100;
8324 
8325 	for (i = 0; i < num; i++) {
8326 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8327 		cmd.payloads[i].address = msgs[i].addr;
8328 		cmd.payloads[i].length = msgs[i].len;
8329 		cmd.payloads[i].data = msgs[i].buf;
8330 	}
8331 
8332 	if (dc_submit_i2c(
8333 			ddc_service->ctx->dc,
8334 			ddc_service->ddc_pin->hw_info.ddc_channel,
8335 			&cmd))
8336 		result = num;
8337 
8338 	kfree(cmd.payloads);
8339 	return result;
8340 }
8341 
8342 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8343 {
8344 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8345 }
8346 
8347 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8348 	.master_xfer = amdgpu_dm_i2c_xfer,
8349 	.functionality = amdgpu_dm_i2c_func,
8350 };
8351 
8352 static struct amdgpu_i2c_adapter *
8353 create_i2c(struct ddc_service *ddc_service,
8354 	   int link_index,
8355 	   int *res)
8356 {
8357 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8358 	struct amdgpu_i2c_adapter *i2c;
8359 
8360 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8361 	if (!i2c)
8362 		return NULL;
8363 	i2c->base.owner = THIS_MODULE;
8364 	i2c->base.class = I2C_CLASS_DDC;
8365 	i2c->base.dev.parent = &adev->pdev->dev;
8366 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8367 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8368 	i2c_set_adapdata(&i2c->base, i2c);
8369 	i2c->ddc_service = ddc_service;
8370 	if (i2c->ddc_service->ddc_pin)
8371 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8372 
8373 	return i2c;
8374 }
8375 
8376 
8377 /*
8378  * Note: this function assumes that dc_link_detect() was called for the
8379  * dc_link which will be represented by this aconnector.
8380  */
8381 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8382 				    struct amdgpu_dm_connector *aconnector,
8383 				    uint32_t link_index,
8384 				    struct amdgpu_encoder *aencoder)
8385 {
8386 	int res = 0;
8387 	int connector_type;
8388 	struct dc *dc = dm->dc;
8389 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8390 	struct amdgpu_i2c_adapter *i2c;
8391 
8392 	link->priv = aconnector;
8393 
8394 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8395 
8396 	i2c = create_i2c(link->ddc, link->link_index, &res);
8397 	if (!i2c) {
8398 		DRM_ERROR("Failed to create i2c adapter data\n");
8399 		return -ENOMEM;
8400 	}
8401 
8402 	aconnector->i2c = i2c;
8403 	res = i2c_add_adapter(&i2c->base);
8404 
8405 	if (res) {
8406 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8407 		goto out_free;
8408 	}
8409 
8410 	connector_type = to_drm_connector_type(link->connector_signal);
8411 
8412 	res = drm_connector_init_with_ddc(
8413 			dm->ddev,
8414 			&aconnector->base,
8415 			&amdgpu_dm_connector_funcs,
8416 			connector_type,
8417 			&i2c->base);
8418 
8419 	if (res) {
8420 		DRM_ERROR("connector_init failed\n");
8421 		aconnector->connector_id = -1;
8422 		goto out_free;
8423 	}
8424 
8425 	drm_connector_helper_add(
8426 			&aconnector->base,
8427 			&amdgpu_dm_connector_helper_funcs);
8428 
8429 	amdgpu_dm_connector_init_helper(
8430 		dm,
8431 		aconnector,
8432 		connector_type,
8433 		link,
8434 		link_index);
8435 
8436 	drm_connector_attach_encoder(
8437 		&aconnector->base, &aencoder->base);
8438 
8439 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8440 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8441 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8442 
8443 out_free:
8444 	if (res) {
8445 		kfree(i2c);
8446 		aconnector->i2c = NULL;
8447 	}
8448 	return res;
8449 }
8450 
8451 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8452 {
8453 	switch (adev->mode_info.num_crtc) {
8454 	case 1:
8455 		return 0x1;
8456 	case 2:
8457 		return 0x3;
8458 	case 3:
8459 		return 0x7;
8460 	case 4:
8461 		return 0xf;
8462 	case 5:
8463 		return 0x1f;
8464 	case 6:
8465 	default:
8466 		return 0x3f;
8467 	}
8468 }
8469 
8470 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8471 				  struct amdgpu_encoder *aencoder,
8472 				  uint32_t link_index)
8473 {
8474 	struct amdgpu_device *adev = drm_to_adev(dev);
8475 
8476 	int res = drm_encoder_init(dev,
8477 				   &aencoder->base,
8478 				   &amdgpu_dm_encoder_funcs,
8479 				   DRM_MODE_ENCODER_TMDS,
8480 				   NULL);
8481 
8482 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8483 
8484 	if (!res)
8485 		aencoder->encoder_id = link_index;
8486 	else
8487 		aencoder->encoder_id = -1;
8488 
8489 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8490 
8491 	return res;
8492 }
8493 
8494 static void manage_dm_interrupts(struct amdgpu_device *adev,
8495 				 struct amdgpu_crtc *acrtc,
8496 				 bool enable)
8497 {
8498 	/*
8499 	 * We have no guarantee that the frontend index maps to the same
8500 	 * backend index - some even map to more than one.
8501 	 *
8502 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8503 	 */
8504 	int irq_type =
8505 		amdgpu_display_crtc_idx_to_irq_type(
8506 			adev,
8507 			acrtc->crtc_id);
8508 
8509 	if (enable) {
8510 		drm_crtc_vblank_on(&acrtc->base);
8511 		amdgpu_irq_get(
8512 			adev,
8513 			&adev->pageflip_irq,
8514 			irq_type);
8515 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8516 		amdgpu_irq_get(
8517 			adev,
8518 			&adev->vline0_irq,
8519 			irq_type);
8520 #endif
8521 	} else {
8522 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8523 		amdgpu_irq_put(
8524 			adev,
8525 			&adev->vline0_irq,
8526 			irq_type);
8527 #endif
8528 		amdgpu_irq_put(
8529 			adev,
8530 			&adev->pageflip_irq,
8531 			irq_type);
8532 		drm_crtc_vblank_off(&acrtc->base);
8533 	}
8534 }
8535 
8536 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8537 				      struct amdgpu_crtc *acrtc)
8538 {
8539 	int irq_type =
8540 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8541 
8542 	/**
8543 	 * This reads the current state for the IRQ and force reapplies
8544 	 * the setting to hardware.
8545 	 */
8546 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8547 }
8548 
8549 static bool
8550 is_scaling_state_different(const struct dm_connector_state *dm_state,
8551 			   const struct dm_connector_state *old_dm_state)
8552 {
8553 	if (dm_state->scaling != old_dm_state->scaling)
8554 		return true;
8555 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8556 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8557 			return true;
8558 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8559 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8560 			return true;
8561 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8562 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8563 		return true;
8564 	return false;
8565 }
8566 
8567 #ifdef CONFIG_DRM_AMD_DC_HDCP
8568 static bool is_content_protection_different(struct drm_connector_state *state,
8569 					    const struct drm_connector_state *old_state,
8570 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8571 {
8572 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8573 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8574 
8575 	/* Handle: Type0/1 change */
8576 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8577 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8578 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8579 		return true;
8580 	}
8581 
8582 	/* CP is being re enabled, ignore this
8583 	 *
8584 	 * Handles:	ENABLED -> DESIRED
8585 	 */
8586 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8587 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8588 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8589 		return false;
8590 	}
8591 
8592 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8593 	 *
8594 	 * Handles:	UNDESIRED -> ENABLED
8595 	 */
8596 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8597 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8598 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8599 
8600 	/* Stream removed and re-enabled
8601 	 *
8602 	 * Can sometimes overlap with the HPD case,
8603 	 * thus set update_hdcp to false to avoid
8604 	 * setting HDCP multiple times.
8605 	 *
8606 	 * Handles:	DESIRED -> DESIRED (Special case)
8607 	 */
8608 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8609 		state->crtc && state->crtc->enabled &&
8610 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8611 		dm_con_state->update_hdcp = false;
8612 		return true;
8613 	}
8614 
8615 	/* Hot-plug, headless s3, dpms
8616 	 *
8617 	 * Only start HDCP if the display is connected/enabled.
8618 	 * update_hdcp flag will be set to false until the next
8619 	 * HPD comes in.
8620 	 *
8621 	 * Handles:	DESIRED -> DESIRED (Special case)
8622 	 */
8623 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8624 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8625 		dm_con_state->update_hdcp = false;
8626 		return true;
8627 	}
8628 
8629 	/*
8630 	 * Handles:	UNDESIRED -> UNDESIRED
8631 	 *		DESIRED -> DESIRED
8632 	 *		ENABLED -> ENABLED
8633 	 */
8634 	if (old_state->content_protection == state->content_protection)
8635 		return false;
8636 
8637 	/*
8638 	 * Handles:	UNDESIRED -> DESIRED
8639 	 *		DESIRED -> UNDESIRED
8640 	 *		ENABLED -> UNDESIRED
8641 	 */
8642 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8643 		return true;
8644 
8645 	/*
8646 	 * Handles:	DESIRED -> ENABLED
8647 	 */
8648 	return false;
8649 }
8650 
8651 #endif
8652 static void remove_stream(struct amdgpu_device *adev,
8653 			  struct amdgpu_crtc *acrtc,
8654 			  struct dc_stream_state *stream)
8655 {
8656 	/* this is the update mode case */
8657 
8658 	acrtc->otg_inst = -1;
8659 	acrtc->enabled = false;
8660 }
8661 
8662 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8663 			       struct dc_cursor_position *position)
8664 {
8665 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8666 	int x, y;
8667 	int xorigin = 0, yorigin = 0;
8668 
8669 	if (!crtc || !plane->state->fb)
8670 		return 0;
8671 
8672 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8673 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8674 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8675 			  __func__,
8676 			  plane->state->crtc_w,
8677 			  plane->state->crtc_h);
8678 		return -EINVAL;
8679 	}
8680 
8681 	x = plane->state->crtc_x;
8682 	y = plane->state->crtc_y;
8683 
8684 	if (x <= -amdgpu_crtc->max_cursor_width ||
8685 	    y <= -amdgpu_crtc->max_cursor_height)
8686 		return 0;
8687 
8688 	if (x < 0) {
8689 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8690 		x = 0;
8691 	}
8692 	if (y < 0) {
8693 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8694 		y = 0;
8695 	}
8696 	position->enable = true;
8697 	position->translate_by_source = true;
8698 	position->x = x;
8699 	position->y = y;
8700 	position->x_hotspot = xorigin;
8701 	position->y_hotspot = yorigin;
8702 
8703 	return 0;
8704 }
8705 
8706 static void handle_cursor_update(struct drm_plane *plane,
8707 				 struct drm_plane_state *old_plane_state)
8708 {
8709 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8710 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8711 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8712 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8713 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8714 	uint64_t address = afb ? afb->address : 0;
8715 	struct dc_cursor_position position = {0};
8716 	struct dc_cursor_attributes attributes;
8717 	int ret;
8718 
8719 	if (!plane->state->fb && !old_plane_state->fb)
8720 		return;
8721 
8722 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8723 		      __func__,
8724 		      amdgpu_crtc->crtc_id,
8725 		      plane->state->crtc_w,
8726 		      plane->state->crtc_h);
8727 
8728 	ret = get_cursor_position(plane, crtc, &position);
8729 	if (ret)
8730 		return;
8731 
8732 	if (!position.enable) {
8733 		/* turn off cursor */
8734 		if (crtc_state && crtc_state->stream) {
8735 			mutex_lock(&adev->dm.dc_lock);
8736 			dc_stream_set_cursor_position(crtc_state->stream,
8737 						      &position);
8738 			mutex_unlock(&adev->dm.dc_lock);
8739 		}
8740 		return;
8741 	}
8742 
8743 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8744 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8745 
8746 	memset(&attributes, 0, sizeof(attributes));
8747 	attributes.address.high_part = upper_32_bits(address);
8748 	attributes.address.low_part  = lower_32_bits(address);
8749 	attributes.width             = plane->state->crtc_w;
8750 	attributes.height            = plane->state->crtc_h;
8751 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8752 	attributes.rotation_angle    = 0;
8753 	attributes.attribute_flags.value = 0;
8754 
8755 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8756 
8757 	if (crtc_state->stream) {
8758 		mutex_lock(&adev->dm.dc_lock);
8759 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8760 							 &attributes))
8761 			DRM_ERROR("DC failed to set cursor attributes\n");
8762 
8763 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8764 						   &position))
8765 			DRM_ERROR("DC failed to set cursor position\n");
8766 		mutex_unlock(&adev->dm.dc_lock);
8767 	}
8768 }
8769 
8770 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8771 {
8772 
8773 	assert_spin_locked(&acrtc->base.dev->event_lock);
8774 	WARN_ON(acrtc->event);
8775 
8776 	acrtc->event = acrtc->base.state->event;
8777 
8778 	/* Set the flip status */
8779 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8780 
8781 	/* Mark this event as consumed */
8782 	acrtc->base.state->event = NULL;
8783 
8784 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8785 		     acrtc->crtc_id);
8786 }
8787 
8788 static void update_freesync_state_on_stream(
8789 	struct amdgpu_display_manager *dm,
8790 	struct dm_crtc_state *new_crtc_state,
8791 	struct dc_stream_state *new_stream,
8792 	struct dc_plane_state *surface,
8793 	u32 flip_timestamp_in_us)
8794 {
8795 	struct mod_vrr_params vrr_params;
8796 	struct dc_info_packet vrr_infopacket = {0};
8797 	struct amdgpu_device *adev = dm->adev;
8798 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8799 	unsigned long flags;
8800 	bool pack_sdp_v1_3 = false;
8801 
8802 	if (!new_stream)
8803 		return;
8804 
8805 	/*
8806 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8807 	 * For now it's sufficient to just guard against these conditions.
8808 	 */
8809 
8810 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8811 		return;
8812 
8813 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8814         vrr_params = acrtc->dm_irq_params.vrr_params;
8815 
8816 	if (surface) {
8817 		mod_freesync_handle_preflip(
8818 			dm->freesync_module,
8819 			surface,
8820 			new_stream,
8821 			flip_timestamp_in_us,
8822 			&vrr_params);
8823 
8824 		if (adev->family < AMDGPU_FAMILY_AI &&
8825 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8826 			mod_freesync_handle_v_update(dm->freesync_module,
8827 						     new_stream, &vrr_params);
8828 
8829 			/* Need to call this before the frame ends. */
8830 			dc_stream_adjust_vmin_vmax(dm->dc,
8831 						   new_crtc_state->stream,
8832 						   &vrr_params.adjust);
8833 		}
8834 	}
8835 
8836 	mod_freesync_build_vrr_infopacket(
8837 		dm->freesync_module,
8838 		new_stream,
8839 		&vrr_params,
8840 		PACKET_TYPE_VRR,
8841 		TRANSFER_FUNC_UNKNOWN,
8842 		&vrr_infopacket,
8843 		pack_sdp_v1_3);
8844 
8845 	new_crtc_state->freesync_timing_changed |=
8846 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8847 			&vrr_params.adjust,
8848 			sizeof(vrr_params.adjust)) != 0);
8849 
8850 	new_crtc_state->freesync_vrr_info_changed |=
8851 		(memcmp(&new_crtc_state->vrr_infopacket,
8852 			&vrr_infopacket,
8853 			sizeof(vrr_infopacket)) != 0);
8854 
8855 	acrtc->dm_irq_params.vrr_params = vrr_params;
8856 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8857 
8858 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8859 	new_stream->vrr_infopacket = vrr_infopacket;
8860 
8861 	if (new_crtc_state->freesync_vrr_info_changed)
8862 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8863 			      new_crtc_state->base.crtc->base.id,
8864 			      (int)new_crtc_state->base.vrr_enabled,
8865 			      (int)vrr_params.state);
8866 
8867 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8868 }
8869 
8870 static void update_stream_irq_parameters(
8871 	struct amdgpu_display_manager *dm,
8872 	struct dm_crtc_state *new_crtc_state)
8873 {
8874 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8875 	struct mod_vrr_params vrr_params;
8876 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8877 	struct amdgpu_device *adev = dm->adev;
8878 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8879 	unsigned long flags;
8880 
8881 	if (!new_stream)
8882 		return;
8883 
8884 	/*
8885 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8886 	 * For now it's sufficient to just guard against these conditions.
8887 	 */
8888 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8889 		return;
8890 
8891 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8892 	vrr_params = acrtc->dm_irq_params.vrr_params;
8893 
8894 	if (new_crtc_state->vrr_supported &&
8895 	    config.min_refresh_in_uhz &&
8896 	    config.max_refresh_in_uhz) {
8897 		/*
8898 		 * if freesync compatible mode was set, config.state will be set
8899 		 * in atomic check
8900 		 */
8901 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8902 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8903 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8904 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8905 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8906 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8907 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8908 		} else {
8909 			config.state = new_crtc_state->base.vrr_enabled ?
8910 						     VRR_STATE_ACTIVE_VARIABLE :
8911 						     VRR_STATE_INACTIVE;
8912 		}
8913 	} else {
8914 		config.state = VRR_STATE_UNSUPPORTED;
8915 	}
8916 
8917 	mod_freesync_build_vrr_params(dm->freesync_module,
8918 				      new_stream,
8919 				      &config, &vrr_params);
8920 
8921 	new_crtc_state->freesync_timing_changed |=
8922 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8923 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8924 
8925 	new_crtc_state->freesync_config = config;
8926 	/* Copy state for access from DM IRQ handler */
8927 	acrtc->dm_irq_params.freesync_config = config;
8928 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8929 	acrtc->dm_irq_params.vrr_params = vrr_params;
8930 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8931 }
8932 
8933 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8934 					    struct dm_crtc_state *new_state)
8935 {
8936 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8937 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8938 
8939 	if (!old_vrr_active && new_vrr_active) {
8940 		/* Transition VRR inactive -> active:
8941 		 * While VRR is active, we must not disable vblank irq, as a
8942 		 * reenable after disable would compute bogus vblank/pflip
8943 		 * timestamps if it likely happened inside display front-porch.
8944 		 *
8945 		 * We also need vupdate irq for the actual core vblank handling
8946 		 * at end of vblank.
8947 		 */
8948 		dm_set_vupdate_irq(new_state->base.crtc, true);
8949 		drm_crtc_vblank_get(new_state->base.crtc);
8950 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8951 				 __func__, new_state->base.crtc->base.id);
8952 	} else if (old_vrr_active && !new_vrr_active) {
8953 		/* Transition VRR active -> inactive:
8954 		 * Allow vblank irq disable again for fixed refresh rate.
8955 		 */
8956 		dm_set_vupdate_irq(new_state->base.crtc, false);
8957 		drm_crtc_vblank_put(new_state->base.crtc);
8958 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8959 				 __func__, new_state->base.crtc->base.id);
8960 	}
8961 }
8962 
8963 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8964 {
8965 	struct drm_plane *plane;
8966 	struct drm_plane_state *old_plane_state;
8967 	int i;
8968 
8969 	/*
8970 	 * TODO: Make this per-stream so we don't issue redundant updates for
8971 	 * commits with multiple streams.
8972 	 */
8973 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8974 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8975 			handle_cursor_update(plane, old_plane_state);
8976 }
8977 
8978 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8979 				    struct dc_state *dc_state,
8980 				    struct drm_device *dev,
8981 				    struct amdgpu_display_manager *dm,
8982 				    struct drm_crtc *pcrtc,
8983 				    bool wait_for_vblank)
8984 {
8985 	uint32_t i;
8986 	uint64_t timestamp_ns;
8987 	struct drm_plane *plane;
8988 	struct drm_plane_state *old_plane_state, *new_plane_state;
8989 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8990 	struct drm_crtc_state *new_pcrtc_state =
8991 			drm_atomic_get_new_crtc_state(state, pcrtc);
8992 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8993 	struct dm_crtc_state *dm_old_crtc_state =
8994 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8995 	int planes_count = 0, vpos, hpos;
8996 	long r;
8997 	unsigned long flags;
8998 	struct amdgpu_bo *abo;
8999 	uint32_t target_vblank, last_flip_vblank;
9000 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9001 	bool pflip_present = false;
9002 	struct {
9003 		struct dc_surface_update surface_updates[MAX_SURFACES];
9004 		struct dc_plane_info plane_infos[MAX_SURFACES];
9005 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9006 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9007 		struct dc_stream_update stream_update;
9008 	} *bundle;
9009 
9010 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9011 
9012 	if (!bundle) {
9013 		dm_error("Failed to allocate update bundle\n");
9014 		goto cleanup;
9015 	}
9016 
9017 	/*
9018 	 * Disable the cursor first if we're disabling all the planes.
9019 	 * It'll remain on the screen after the planes are re-enabled
9020 	 * if we don't.
9021 	 */
9022 	if (acrtc_state->active_planes == 0)
9023 		amdgpu_dm_commit_cursors(state);
9024 
9025 	/* update planes when needed */
9026 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9027 		struct drm_crtc *crtc = new_plane_state->crtc;
9028 		struct drm_crtc_state *new_crtc_state;
9029 		struct drm_framebuffer *fb = new_plane_state->fb;
9030 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9031 		bool plane_needs_flip;
9032 		struct dc_plane_state *dc_plane;
9033 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9034 
9035 		/* Cursor plane is handled after stream updates */
9036 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9037 			continue;
9038 
9039 		if (!fb || !crtc || pcrtc != crtc)
9040 			continue;
9041 
9042 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9043 		if (!new_crtc_state->active)
9044 			continue;
9045 
9046 		dc_plane = dm_new_plane_state->dc_state;
9047 
9048 		bundle->surface_updates[planes_count].surface = dc_plane;
9049 		if (new_pcrtc_state->color_mgmt_changed) {
9050 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9051 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9052 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9053 		}
9054 
9055 		fill_dc_scaling_info(dm->adev, new_plane_state,
9056 				     &bundle->scaling_infos[planes_count]);
9057 
9058 		bundle->surface_updates[planes_count].scaling_info =
9059 			&bundle->scaling_infos[planes_count];
9060 
9061 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9062 
9063 		pflip_present = pflip_present || plane_needs_flip;
9064 
9065 		if (!plane_needs_flip) {
9066 			planes_count += 1;
9067 			continue;
9068 		}
9069 
9070 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9071 
9072 		/*
9073 		 * Wait for all fences on this FB. Do limited wait to avoid
9074 		 * deadlock during GPU reset when this fence will not signal
9075 		 * but we hold reservation lock for the BO.
9076 		 */
9077 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9078 					  msecs_to_jiffies(5000));
9079 		if (unlikely(r <= 0))
9080 			DRM_ERROR("Waiting for fences timed out!");
9081 
9082 		fill_dc_plane_info_and_addr(
9083 			dm->adev, new_plane_state,
9084 			afb->tiling_flags,
9085 			&bundle->plane_infos[planes_count],
9086 			&bundle->flip_addrs[planes_count].address,
9087 			afb->tmz_surface, false);
9088 
9089 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9090 				 new_plane_state->plane->index,
9091 				 bundle->plane_infos[planes_count].dcc.enable);
9092 
9093 		bundle->surface_updates[planes_count].plane_info =
9094 			&bundle->plane_infos[planes_count];
9095 
9096 		/*
9097 		 * Only allow immediate flips for fast updates that don't
9098 		 * change FB pitch, DCC state, rotation or mirroing.
9099 		 */
9100 		bundle->flip_addrs[planes_count].flip_immediate =
9101 			crtc->state->async_flip &&
9102 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9103 
9104 		timestamp_ns = ktime_get_ns();
9105 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9106 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9107 		bundle->surface_updates[planes_count].surface = dc_plane;
9108 
9109 		if (!bundle->surface_updates[planes_count].surface) {
9110 			DRM_ERROR("No surface for CRTC: id=%d\n",
9111 					acrtc_attach->crtc_id);
9112 			continue;
9113 		}
9114 
9115 		if (plane == pcrtc->primary)
9116 			update_freesync_state_on_stream(
9117 				dm,
9118 				acrtc_state,
9119 				acrtc_state->stream,
9120 				dc_plane,
9121 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9122 
9123 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9124 				 __func__,
9125 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9126 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9127 
9128 		planes_count += 1;
9129 
9130 	}
9131 
9132 	if (pflip_present) {
9133 		if (!vrr_active) {
9134 			/* Use old throttling in non-vrr fixed refresh rate mode
9135 			 * to keep flip scheduling based on target vblank counts
9136 			 * working in a backwards compatible way, e.g., for
9137 			 * clients using the GLX_OML_sync_control extension or
9138 			 * DRI3/Present extension with defined target_msc.
9139 			 */
9140 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9141 		}
9142 		else {
9143 			/* For variable refresh rate mode only:
9144 			 * Get vblank of last completed flip to avoid > 1 vrr
9145 			 * flips per video frame by use of throttling, but allow
9146 			 * flip programming anywhere in the possibly large
9147 			 * variable vrr vblank interval for fine-grained flip
9148 			 * timing control and more opportunity to avoid stutter
9149 			 * on late submission of flips.
9150 			 */
9151 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9152 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9153 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9154 		}
9155 
9156 		target_vblank = last_flip_vblank + wait_for_vblank;
9157 
9158 		/*
9159 		 * Wait until we're out of the vertical blank period before the one
9160 		 * targeted by the flip
9161 		 */
9162 		while ((acrtc_attach->enabled &&
9163 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9164 							    0, &vpos, &hpos, NULL,
9165 							    NULL, &pcrtc->hwmode)
9166 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9167 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9168 			(int)(target_vblank -
9169 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9170 			usleep_range(1000, 1100);
9171 		}
9172 
9173 		/**
9174 		 * Prepare the flip event for the pageflip interrupt to handle.
9175 		 *
9176 		 * This only works in the case where we've already turned on the
9177 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9178 		 * from 0 -> n planes we have to skip a hardware generated event
9179 		 * and rely on sending it from software.
9180 		 */
9181 		if (acrtc_attach->base.state->event &&
9182 		    acrtc_state->active_planes > 0 &&
9183 		    !acrtc_state->force_dpms_off) {
9184 			drm_crtc_vblank_get(pcrtc);
9185 
9186 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9187 
9188 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9189 			prepare_flip_isr(acrtc_attach);
9190 
9191 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9192 		}
9193 
9194 		if (acrtc_state->stream) {
9195 			if (acrtc_state->freesync_vrr_info_changed)
9196 				bundle->stream_update.vrr_infopacket =
9197 					&acrtc_state->stream->vrr_infopacket;
9198 		}
9199 	}
9200 
9201 	/* Update the planes if changed or disable if we don't have any. */
9202 	if ((planes_count || acrtc_state->active_planes == 0) &&
9203 		acrtc_state->stream) {
9204 #if defined(CONFIG_DRM_AMD_DC_DCN)
9205 		/*
9206 		 * If PSR or idle optimizations are enabled then flush out
9207 		 * any pending work before hardware programming.
9208 		 */
9209 		if (dm->vblank_control_workqueue)
9210 			flush_workqueue(dm->vblank_control_workqueue);
9211 #endif
9212 
9213 		bundle->stream_update.stream = acrtc_state->stream;
9214 		if (new_pcrtc_state->mode_changed) {
9215 			bundle->stream_update.src = acrtc_state->stream->src;
9216 			bundle->stream_update.dst = acrtc_state->stream->dst;
9217 		}
9218 
9219 		if (new_pcrtc_state->color_mgmt_changed) {
9220 			/*
9221 			 * TODO: This isn't fully correct since we've actually
9222 			 * already modified the stream in place.
9223 			 */
9224 			bundle->stream_update.gamut_remap =
9225 				&acrtc_state->stream->gamut_remap_matrix;
9226 			bundle->stream_update.output_csc_transform =
9227 				&acrtc_state->stream->csc_color_matrix;
9228 			bundle->stream_update.out_transfer_func =
9229 				acrtc_state->stream->out_transfer_func;
9230 		}
9231 
9232 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9233 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9234 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9235 
9236 		/*
9237 		 * If FreeSync state on the stream has changed then we need to
9238 		 * re-adjust the min/max bounds now that DC doesn't handle this
9239 		 * as part of commit.
9240 		 */
9241 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9242 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9243 			dc_stream_adjust_vmin_vmax(
9244 				dm->dc, acrtc_state->stream,
9245 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9246 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9247 		}
9248 		mutex_lock(&dm->dc_lock);
9249 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9250 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9251 			amdgpu_dm_psr_disable(acrtc_state->stream);
9252 
9253 		dc_commit_updates_for_stream(dm->dc,
9254 						     bundle->surface_updates,
9255 						     planes_count,
9256 						     acrtc_state->stream,
9257 						     &bundle->stream_update,
9258 						     dc_state);
9259 
9260 		/**
9261 		 * Enable or disable the interrupts on the backend.
9262 		 *
9263 		 * Most pipes are put into power gating when unused.
9264 		 *
9265 		 * When power gating is enabled on a pipe we lose the
9266 		 * interrupt enablement state when power gating is disabled.
9267 		 *
9268 		 * So we need to update the IRQ control state in hardware
9269 		 * whenever the pipe turns on (since it could be previously
9270 		 * power gated) or off (since some pipes can't be power gated
9271 		 * on some ASICs).
9272 		 */
9273 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9274 			dm_update_pflip_irq_state(drm_to_adev(dev),
9275 						  acrtc_attach);
9276 
9277 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9278 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9279 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9280 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9281 
9282 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9283 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9284 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9285 			struct amdgpu_dm_connector *aconn =
9286 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9287 
9288 			if (aconn->psr_skip_count > 0)
9289 				aconn->psr_skip_count--;
9290 
9291 			/* Allow PSR when skip count is 0. */
9292 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9293 		} else {
9294 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9295 		}
9296 
9297 		mutex_unlock(&dm->dc_lock);
9298 	}
9299 
9300 	/*
9301 	 * Update cursor state *after* programming all the planes.
9302 	 * This avoids redundant programming in the case where we're going
9303 	 * to be disabling a single plane - those pipes are being disabled.
9304 	 */
9305 	if (acrtc_state->active_planes)
9306 		amdgpu_dm_commit_cursors(state);
9307 
9308 cleanup:
9309 	kfree(bundle);
9310 }
9311 
9312 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9313 				   struct drm_atomic_state *state)
9314 {
9315 	struct amdgpu_device *adev = drm_to_adev(dev);
9316 	struct amdgpu_dm_connector *aconnector;
9317 	struct drm_connector *connector;
9318 	struct drm_connector_state *old_con_state, *new_con_state;
9319 	struct drm_crtc_state *new_crtc_state;
9320 	struct dm_crtc_state *new_dm_crtc_state;
9321 	const struct dc_stream_status *status;
9322 	int i, inst;
9323 
9324 	/* Notify device removals. */
9325 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9326 		if (old_con_state->crtc != new_con_state->crtc) {
9327 			/* CRTC changes require notification. */
9328 			goto notify;
9329 		}
9330 
9331 		if (!new_con_state->crtc)
9332 			continue;
9333 
9334 		new_crtc_state = drm_atomic_get_new_crtc_state(
9335 			state, new_con_state->crtc);
9336 
9337 		if (!new_crtc_state)
9338 			continue;
9339 
9340 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9341 			continue;
9342 
9343 	notify:
9344 		aconnector = to_amdgpu_dm_connector(connector);
9345 
9346 		mutex_lock(&adev->dm.audio_lock);
9347 		inst = aconnector->audio_inst;
9348 		aconnector->audio_inst = -1;
9349 		mutex_unlock(&adev->dm.audio_lock);
9350 
9351 		amdgpu_dm_audio_eld_notify(adev, inst);
9352 	}
9353 
9354 	/* Notify audio device additions. */
9355 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9356 		if (!new_con_state->crtc)
9357 			continue;
9358 
9359 		new_crtc_state = drm_atomic_get_new_crtc_state(
9360 			state, new_con_state->crtc);
9361 
9362 		if (!new_crtc_state)
9363 			continue;
9364 
9365 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9366 			continue;
9367 
9368 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9369 		if (!new_dm_crtc_state->stream)
9370 			continue;
9371 
9372 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9373 		if (!status)
9374 			continue;
9375 
9376 		aconnector = to_amdgpu_dm_connector(connector);
9377 
9378 		mutex_lock(&adev->dm.audio_lock);
9379 		inst = status->audio_inst;
9380 		aconnector->audio_inst = inst;
9381 		mutex_unlock(&adev->dm.audio_lock);
9382 
9383 		amdgpu_dm_audio_eld_notify(adev, inst);
9384 	}
9385 }
9386 
9387 /*
9388  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9389  * @crtc_state: the DRM CRTC state
9390  * @stream_state: the DC stream state.
9391  *
9392  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9393  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9394  */
9395 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9396 						struct dc_stream_state *stream_state)
9397 {
9398 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9399 }
9400 
9401 /**
9402  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9403  * @state: The atomic state to commit
9404  *
9405  * This will tell DC to commit the constructed DC state from atomic_check,
9406  * programming the hardware. Any failures here implies a hardware failure, since
9407  * atomic check should have filtered anything non-kosher.
9408  */
9409 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9410 {
9411 	struct drm_device *dev = state->dev;
9412 	struct amdgpu_device *adev = drm_to_adev(dev);
9413 	struct amdgpu_display_manager *dm = &adev->dm;
9414 	struct dm_atomic_state *dm_state;
9415 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9416 	uint32_t i, j;
9417 	struct drm_crtc *crtc;
9418 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9419 	unsigned long flags;
9420 	bool wait_for_vblank = true;
9421 	struct drm_connector *connector;
9422 	struct drm_connector_state *old_con_state, *new_con_state;
9423 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9424 	int crtc_disable_count = 0;
9425 	bool mode_set_reset_required = false;
9426 
9427 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9428 
9429 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9430 
9431 	dm_state = dm_atomic_get_new_state(state);
9432 	if (dm_state && dm_state->context) {
9433 		dc_state = dm_state->context;
9434 	} else {
9435 		/* No state changes, retain current state. */
9436 		dc_state_temp = dc_create_state(dm->dc);
9437 		ASSERT(dc_state_temp);
9438 		dc_state = dc_state_temp;
9439 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9440 	}
9441 
9442 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9443 				       new_crtc_state, i) {
9444 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9445 
9446 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9447 
9448 		if (old_crtc_state->active &&
9449 		    (!new_crtc_state->active ||
9450 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9451 			manage_dm_interrupts(adev, acrtc, false);
9452 			dc_stream_release(dm_old_crtc_state->stream);
9453 		}
9454 	}
9455 
9456 	drm_atomic_helper_calc_timestamping_constants(state);
9457 
9458 	/* update changed items */
9459 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9460 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9461 
9462 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9463 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9464 
9465 		DRM_DEBUG_ATOMIC(
9466 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9467 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9468 			"connectors_changed:%d\n",
9469 			acrtc->crtc_id,
9470 			new_crtc_state->enable,
9471 			new_crtc_state->active,
9472 			new_crtc_state->planes_changed,
9473 			new_crtc_state->mode_changed,
9474 			new_crtc_state->active_changed,
9475 			new_crtc_state->connectors_changed);
9476 
9477 		/* Disable cursor if disabling crtc */
9478 		if (old_crtc_state->active && !new_crtc_state->active) {
9479 			struct dc_cursor_position position;
9480 
9481 			memset(&position, 0, sizeof(position));
9482 			mutex_lock(&dm->dc_lock);
9483 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9484 			mutex_unlock(&dm->dc_lock);
9485 		}
9486 
9487 		/* Copy all transient state flags into dc state */
9488 		if (dm_new_crtc_state->stream) {
9489 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9490 							    dm_new_crtc_state->stream);
9491 		}
9492 
9493 		/* handles headless hotplug case, updating new_state and
9494 		 * aconnector as needed
9495 		 */
9496 
9497 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9498 
9499 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9500 
9501 			if (!dm_new_crtc_state->stream) {
9502 				/*
9503 				 * this could happen because of issues with
9504 				 * userspace notifications delivery.
9505 				 * In this case userspace tries to set mode on
9506 				 * display which is disconnected in fact.
9507 				 * dc_sink is NULL in this case on aconnector.
9508 				 * We expect reset mode will come soon.
9509 				 *
9510 				 * This can also happen when unplug is done
9511 				 * during resume sequence ended
9512 				 *
9513 				 * In this case, we want to pretend we still
9514 				 * have a sink to keep the pipe running so that
9515 				 * hw state is consistent with the sw state
9516 				 */
9517 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9518 						__func__, acrtc->base.base.id);
9519 				continue;
9520 			}
9521 
9522 			if (dm_old_crtc_state->stream)
9523 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9524 
9525 			pm_runtime_get_noresume(dev->dev);
9526 
9527 			acrtc->enabled = true;
9528 			acrtc->hw_mode = new_crtc_state->mode;
9529 			crtc->hwmode = new_crtc_state->mode;
9530 			mode_set_reset_required = true;
9531 		} else if (modereset_required(new_crtc_state)) {
9532 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9533 			/* i.e. reset mode */
9534 			if (dm_old_crtc_state->stream)
9535 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9536 
9537 			mode_set_reset_required = true;
9538 		}
9539 	} /* for_each_crtc_in_state() */
9540 
9541 	if (dc_state) {
9542 		/* if there mode set or reset, disable eDP PSR */
9543 		if (mode_set_reset_required) {
9544 #if defined(CONFIG_DRM_AMD_DC_DCN)
9545 			if (dm->vblank_control_workqueue)
9546 				flush_workqueue(dm->vblank_control_workqueue);
9547 #endif
9548 			amdgpu_dm_psr_disable_all(dm);
9549 		}
9550 
9551 		dm_enable_per_frame_crtc_master_sync(dc_state);
9552 		mutex_lock(&dm->dc_lock);
9553 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9554 #if defined(CONFIG_DRM_AMD_DC_DCN)
9555                /* Allow idle optimization when vblank count is 0 for display off */
9556                if (dm->active_vblank_irq_count == 0)
9557                    dc_allow_idle_optimizations(dm->dc,true);
9558 #endif
9559 		mutex_unlock(&dm->dc_lock);
9560 	}
9561 
9562 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9563 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9564 
9565 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9566 
9567 		if (dm_new_crtc_state->stream != NULL) {
9568 			const struct dc_stream_status *status =
9569 					dc_stream_get_status(dm_new_crtc_state->stream);
9570 
9571 			if (!status)
9572 				status = dc_stream_get_status_from_state(dc_state,
9573 									 dm_new_crtc_state->stream);
9574 			if (!status)
9575 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9576 			else
9577 				acrtc->otg_inst = status->primary_otg_inst;
9578 		}
9579 	}
9580 #ifdef CONFIG_DRM_AMD_DC_HDCP
9581 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9582 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9583 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9584 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9585 
9586 		new_crtc_state = NULL;
9587 
9588 		if (acrtc)
9589 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9590 
9591 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9592 
9593 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9594 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9595 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9596 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9597 			dm_new_con_state->update_hdcp = true;
9598 			continue;
9599 		}
9600 
9601 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9602 			hdcp_update_display(
9603 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9604 				new_con_state->hdcp_content_type,
9605 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9606 	}
9607 #endif
9608 
9609 	/* Handle connector state changes */
9610 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9611 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9612 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9613 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9614 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9615 		struct dc_stream_update stream_update;
9616 		struct dc_info_packet hdr_packet;
9617 		struct dc_stream_status *status = NULL;
9618 		bool abm_changed, hdr_changed, scaling_changed;
9619 
9620 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9621 		memset(&stream_update, 0, sizeof(stream_update));
9622 
9623 		if (acrtc) {
9624 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9625 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9626 		}
9627 
9628 		/* Skip any modesets/resets */
9629 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9630 			continue;
9631 
9632 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9633 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9634 
9635 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9636 							     dm_old_con_state);
9637 
9638 		abm_changed = dm_new_crtc_state->abm_level !=
9639 			      dm_old_crtc_state->abm_level;
9640 
9641 		hdr_changed =
9642 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9643 
9644 		if (!scaling_changed && !abm_changed && !hdr_changed)
9645 			continue;
9646 
9647 		stream_update.stream = dm_new_crtc_state->stream;
9648 		if (scaling_changed) {
9649 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9650 					dm_new_con_state, dm_new_crtc_state->stream);
9651 
9652 			stream_update.src = dm_new_crtc_state->stream->src;
9653 			stream_update.dst = dm_new_crtc_state->stream->dst;
9654 		}
9655 
9656 		if (abm_changed) {
9657 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9658 
9659 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9660 		}
9661 
9662 		if (hdr_changed) {
9663 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9664 			stream_update.hdr_static_metadata = &hdr_packet;
9665 		}
9666 
9667 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9668 
9669 		if (WARN_ON(!status))
9670 			continue;
9671 
9672 		WARN_ON(!status->plane_count);
9673 
9674 		/*
9675 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9676 		 * Here we create an empty update on each plane.
9677 		 * To fix this, DC should permit updating only stream properties.
9678 		 */
9679 		for (j = 0; j < status->plane_count; j++)
9680 			dummy_updates[j].surface = status->plane_states[0];
9681 
9682 
9683 		mutex_lock(&dm->dc_lock);
9684 		dc_commit_updates_for_stream(dm->dc,
9685 						     dummy_updates,
9686 						     status->plane_count,
9687 						     dm_new_crtc_state->stream,
9688 						     &stream_update,
9689 						     dc_state);
9690 		mutex_unlock(&dm->dc_lock);
9691 	}
9692 
9693 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9694 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9695 				      new_crtc_state, i) {
9696 		if (old_crtc_state->active && !new_crtc_state->active)
9697 			crtc_disable_count++;
9698 
9699 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9700 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9701 
9702 		/* For freesync config update on crtc state and params for irq */
9703 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9704 
9705 		/* Handle vrr on->off / off->on transitions */
9706 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9707 						dm_new_crtc_state);
9708 	}
9709 
9710 	/**
9711 	 * Enable interrupts for CRTCs that are newly enabled or went through
9712 	 * a modeset. It was intentionally deferred until after the front end
9713 	 * state was modified to wait until the OTG was on and so the IRQ
9714 	 * handlers didn't access stale or invalid state.
9715 	 */
9716 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9717 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9718 #ifdef CONFIG_DEBUG_FS
9719 		bool configure_crc = false;
9720 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9721 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9722 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9723 #endif
9724 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9725 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9726 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9727 #endif
9728 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9729 
9730 		if (new_crtc_state->active &&
9731 		    (!old_crtc_state->active ||
9732 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9733 			dc_stream_retain(dm_new_crtc_state->stream);
9734 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9735 			manage_dm_interrupts(adev, acrtc, true);
9736 
9737 #ifdef CONFIG_DEBUG_FS
9738 			/**
9739 			 * Frontend may have changed so reapply the CRC capture
9740 			 * settings for the stream.
9741 			 */
9742 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9743 
9744 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9745 				configure_crc = true;
9746 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9747 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9748 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9749 					acrtc->dm_irq_params.crc_window.update_win = true;
9750 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9751 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9752 					crc_rd_wrk->crtc = crtc;
9753 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9754 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9755 				}
9756 #endif
9757 			}
9758 
9759 			if (configure_crc)
9760 				if (amdgpu_dm_crtc_configure_crc_source(
9761 					crtc, dm_new_crtc_state, cur_crc_src))
9762 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9763 #endif
9764 		}
9765 	}
9766 
9767 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9768 		if (new_crtc_state->async_flip)
9769 			wait_for_vblank = false;
9770 
9771 	/* update planes when needed per crtc*/
9772 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9773 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9774 
9775 		if (dm_new_crtc_state->stream)
9776 			amdgpu_dm_commit_planes(state, dc_state, dev,
9777 						dm, crtc, wait_for_vblank);
9778 	}
9779 
9780 	/* Update audio instances for each connector. */
9781 	amdgpu_dm_commit_audio(dev, state);
9782 
9783 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9784 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9785 	/* restore the backlight level */
9786 	for (i = 0; i < dm->num_of_edps; i++) {
9787 		if (dm->backlight_dev[i] &&
9788 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9789 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9790 	}
9791 #endif
9792 	/*
9793 	 * send vblank event on all events not handled in flip and
9794 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9795 	 */
9796 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9797 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9798 
9799 		if (new_crtc_state->event)
9800 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9801 
9802 		new_crtc_state->event = NULL;
9803 	}
9804 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9805 
9806 	/* Signal HW programming completion */
9807 	drm_atomic_helper_commit_hw_done(state);
9808 
9809 	if (wait_for_vblank)
9810 		drm_atomic_helper_wait_for_flip_done(dev, state);
9811 
9812 	drm_atomic_helper_cleanup_planes(dev, state);
9813 
9814 	/* return the stolen vga memory back to VRAM */
9815 	if (!adev->mman.keep_stolen_vga_memory)
9816 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9817 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9818 
9819 	/*
9820 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9821 	 * so we can put the GPU into runtime suspend if we're not driving any
9822 	 * displays anymore
9823 	 */
9824 	for (i = 0; i < crtc_disable_count; i++)
9825 		pm_runtime_put_autosuspend(dev->dev);
9826 	pm_runtime_mark_last_busy(dev->dev);
9827 
9828 	if (dc_state_temp)
9829 		dc_release_state(dc_state_temp);
9830 }
9831 
9832 
9833 static int dm_force_atomic_commit(struct drm_connector *connector)
9834 {
9835 	int ret = 0;
9836 	struct drm_device *ddev = connector->dev;
9837 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9838 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9839 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9840 	struct drm_connector_state *conn_state;
9841 	struct drm_crtc_state *crtc_state;
9842 	struct drm_plane_state *plane_state;
9843 
9844 	if (!state)
9845 		return -ENOMEM;
9846 
9847 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9848 
9849 	/* Construct an atomic state to restore previous display setting */
9850 
9851 	/*
9852 	 * Attach connectors to drm_atomic_state
9853 	 */
9854 	conn_state = drm_atomic_get_connector_state(state, connector);
9855 
9856 	ret = PTR_ERR_OR_ZERO(conn_state);
9857 	if (ret)
9858 		goto out;
9859 
9860 	/* Attach crtc to drm_atomic_state*/
9861 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9862 
9863 	ret = PTR_ERR_OR_ZERO(crtc_state);
9864 	if (ret)
9865 		goto out;
9866 
9867 	/* force a restore */
9868 	crtc_state->mode_changed = true;
9869 
9870 	/* Attach plane to drm_atomic_state */
9871 	plane_state = drm_atomic_get_plane_state(state, plane);
9872 
9873 	ret = PTR_ERR_OR_ZERO(plane_state);
9874 	if (ret)
9875 		goto out;
9876 
9877 	/* Call commit internally with the state we just constructed */
9878 	ret = drm_atomic_commit(state);
9879 
9880 out:
9881 	drm_atomic_state_put(state);
9882 	if (ret)
9883 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9884 
9885 	return ret;
9886 }
9887 
9888 /*
9889  * This function handles all cases when set mode does not come upon hotplug.
9890  * This includes when a display is unplugged then plugged back into the
9891  * same port and when running without usermode desktop manager supprot
9892  */
9893 void dm_restore_drm_connector_state(struct drm_device *dev,
9894 				    struct drm_connector *connector)
9895 {
9896 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9897 	struct amdgpu_crtc *disconnected_acrtc;
9898 	struct dm_crtc_state *acrtc_state;
9899 
9900 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9901 		return;
9902 
9903 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9904 	if (!disconnected_acrtc)
9905 		return;
9906 
9907 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9908 	if (!acrtc_state->stream)
9909 		return;
9910 
9911 	/*
9912 	 * If the previous sink is not released and different from the current,
9913 	 * we deduce we are in a state where we can not rely on usermode call
9914 	 * to turn on the display, so we do it here
9915 	 */
9916 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9917 		dm_force_atomic_commit(&aconnector->base);
9918 }
9919 
9920 /*
9921  * Grabs all modesetting locks to serialize against any blocking commits,
9922  * Waits for completion of all non blocking commits.
9923  */
9924 static int do_aquire_global_lock(struct drm_device *dev,
9925 				 struct drm_atomic_state *state)
9926 {
9927 	struct drm_crtc *crtc;
9928 	struct drm_crtc_commit *commit;
9929 	long ret;
9930 
9931 	/*
9932 	 * Adding all modeset locks to aquire_ctx will
9933 	 * ensure that when the framework release it the
9934 	 * extra locks we are locking here will get released to
9935 	 */
9936 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9937 	if (ret)
9938 		return ret;
9939 
9940 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9941 		spin_lock(&crtc->commit_lock);
9942 		commit = list_first_entry_or_null(&crtc->commit_list,
9943 				struct drm_crtc_commit, commit_entry);
9944 		if (commit)
9945 			drm_crtc_commit_get(commit);
9946 		spin_unlock(&crtc->commit_lock);
9947 
9948 		if (!commit)
9949 			continue;
9950 
9951 		/*
9952 		 * Make sure all pending HW programming completed and
9953 		 * page flips done
9954 		 */
9955 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9956 
9957 		if (ret > 0)
9958 			ret = wait_for_completion_interruptible_timeout(
9959 					&commit->flip_done, 10*HZ);
9960 
9961 		if (ret == 0)
9962 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9963 				  "timed out\n", crtc->base.id, crtc->name);
9964 
9965 		drm_crtc_commit_put(commit);
9966 	}
9967 
9968 	return ret < 0 ? ret : 0;
9969 }
9970 
9971 static void get_freesync_config_for_crtc(
9972 	struct dm_crtc_state *new_crtc_state,
9973 	struct dm_connector_state *new_con_state)
9974 {
9975 	struct mod_freesync_config config = {0};
9976 	struct amdgpu_dm_connector *aconnector =
9977 			to_amdgpu_dm_connector(new_con_state->base.connector);
9978 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9979 	int vrefresh = drm_mode_vrefresh(mode);
9980 	bool fs_vid_mode = false;
9981 
9982 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9983 					vrefresh >= aconnector->min_vfreq &&
9984 					vrefresh <= aconnector->max_vfreq;
9985 
9986 	if (new_crtc_state->vrr_supported) {
9987 		new_crtc_state->stream->ignore_msa_timing_param = true;
9988 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9989 
9990 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9991 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9992 		config.vsif_supported = true;
9993 		config.btr = true;
9994 
9995 		if (fs_vid_mode) {
9996 			config.state = VRR_STATE_ACTIVE_FIXED;
9997 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9998 			goto out;
9999 		} else if (new_crtc_state->base.vrr_enabled) {
10000 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10001 		} else {
10002 			config.state = VRR_STATE_INACTIVE;
10003 		}
10004 	}
10005 out:
10006 	new_crtc_state->freesync_config = config;
10007 }
10008 
10009 static void reset_freesync_config_for_crtc(
10010 	struct dm_crtc_state *new_crtc_state)
10011 {
10012 	new_crtc_state->vrr_supported = false;
10013 
10014 	memset(&new_crtc_state->vrr_infopacket, 0,
10015 	       sizeof(new_crtc_state->vrr_infopacket));
10016 }
10017 
10018 static bool
10019 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10020 				 struct drm_crtc_state *new_crtc_state)
10021 {
10022 	struct drm_display_mode old_mode, new_mode;
10023 
10024 	if (!old_crtc_state || !new_crtc_state)
10025 		return false;
10026 
10027 	old_mode = old_crtc_state->mode;
10028 	new_mode = new_crtc_state->mode;
10029 
10030 	if (old_mode.clock       == new_mode.clock &&
10031 	    old_mode.hdisplay    == new_mode.hdisplay &&
10032 	    old_mode.vdisplay    == new_mode.vdisplay &&
10033 	    old_mode.htotal      == new_mode.htotal &&
10034 	    old_mode.vtotal      != new_mode.vtotal &&
10035 	    old_mode.hsync_start == new_mode.hsync_start &&
10036 	    old_mode.vsync_start != new_mode.vsync_start &&
10037 	    old_mode.hsync_end   == new_mode.hsync_end &&
10038 	    old_mode.vsync_end   != new_mode.vsync_end &&
10039 	    old_mode.hskew       == new_mode.hskew &&
10040 	    old_mode.vscan       == new_mode.vscan &&
10041 	    (old_mode.vsync_end - old_mode.vsync_start) ==
10042 	    (new_mode.vsync_end - new_mode.vsync_start))
10043 		return true;
10044 
10045 	return false;
10046 }
10047 
10048 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10049 	uint64_t num, den, res;
10050 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10051 
10052 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10053 
10054 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10055 	den = (unsigned long long)new_crtc_state->mode.htotal *
10056 	      (unsigned long long)new_crtc_state->mode.vtotal;
10057 
10058 	res = div_u64(num, den);
10059 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10060 }
10061 
10062 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10063 				struct drm_atomic_state *state,
10064 				struct drm_crtc *crtc,
10065 				struct drm_crtc_state *old_crtc_state,
10066 				struct drm_crtc_state *new_crtc_state,
10067 				bool enable,
10068 				bool *lock_and_validation_needed)
10069 {
10070 	struct dm_atomic_state *dm_state = NULL;
10071 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10072 	struct dc_stream_state *new_stream;
10073 	int ret = 0;
10074 
10075 	/*
10076 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10077 	 * update changed items
10078 	 */
10079 	struct amdgpu_crtc *acrtc = NULL;
10080 	struct amdgpu_dm_connector *aconnector = NULL;
10081 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10082 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10083 
10084 	new_stream = NULL;
10085 
10086 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10087 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10088 	acrtc = to_amdgpu_crtc(crtc);
10089 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10090 
10091 	/* TODO This hack should go away */
10092 	if (aconnector && enable) {
10093 		/* Make sure fake sink is created in plug-in scenario */
10094 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10095 							    &aconnector->base);
10096 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10097 							    &aconnector->base);
10098 
10099 		if (IS_ERR(drm_new_conn_state)) {
10100 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10101 			goto fail;
10102 		}
10103 
10104 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10105 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10106 
10107 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10108 			goto skip_modeset;
10109 
10110 		new_stream = create_validate_stream_for_sink(aconnector,
10111 							     &new_crtc_state->mode,
10112 							     dm_new_conn_state,
10113 							     dm_old_crtc_state->stream);
10114 
10115 		/*
10116 		 * we can have no stream on ACTION_SET if a display
10117 		 * was disconnected during S3, in this case it is not an
10118 		 * error, the OS will be updated after detection, and
10119 		 * will do the right thing on next atomic commit
10120 		 */
10121 
10122 		if (!new_stream) {
10123 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10124 					__func__, acrtc->base.base.id);
10125 			ret = -ENOMEM;
10126 			goto fail;
10127 		}
10128 
10129 		/*
10130 		 * TODO: Check VSDB bits to decide whether this should
10131 		 * be enabled or not.
10132 		 */
10133 		new_stream->triggered_crtc_reset.enabled =
10134 			dm->force_timing_sync;
10135 
10136 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10137 
10138 		ret = fill_hdr_info_packet(drm_new_conn_state,
10139 					   &new_stream->hdr_static_metadata);
10140 		if (ret)
10141 			goto fail;
10142 
10143 		/*
10144 		 * If we already removed the old stream from the context
10145 		 * (and set the new stream to NULL) then we can't reuse
10146 		 * the old stream even if the stream and scaling are unchanged.
10147 		 * We'll hit the BUG_ON and black screen.
10148 		 *
10149 		 * TODO: Refactor this function to allow this check to work
10150 		 * in all conditions.
10151 		 */
10152 		if (amdgpu_freesync_vid_mode &&
10153 		    dm_new_crtc_state->stream &&
10154 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10155 			goto skip_modeset;
10156 
10157 		if (dm_new_crtc_state->stream &&
10158 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10159 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10160 			new_crtc_state->mode_changed = false;
10161 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10162 					 new_crtc_state->mode_changed);
10163 		}
10164 	}
10165 
10166 	/* mode_changed flag may get updated above, need to check again */
10167 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10168 		goto skip_modeset;
10169 
10170 	DRM_DEBUG_ATOMIC(
10171 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10172 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10173 		"connectors_changed:%d\n",
10174 		acrtc->crtc_id,
10175 		new_crtc_state->enable,
10176 		new_crtc_state->active,
10177 		new_crtc_state->planes_changed,
10178 		new_crtc_state->mode_changed,
10179 		new_crtc_state->active_changed,
10180 		new_crtc_state->connectors_changed);
10181 
10182 	/* Remove stream for any changed/disabled CRTC */
10183 	if (!enable) {
10184 
10185 		if (!dm_old_crtc_state->stream)
10186 			goto skip_modeset;
10187 
10188 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10189 		    is_timing_unchanged_for_freesync(new_crtc_state,
10190 						     old_crtc_state)) {
10191 			new_crtc_state->mode_changed = false;
10192 			DRM_DEBUG_DRIVER(
10193 				"Mode change not required for front porch change, "
10194 				"setting mode_changed to %d",
10195 				new_crtc_state->mode_changed);
10196 
10197 			set_freesync_fixed_config(dm_new_crtc_state);
10198 
10199 			goto skip_modeset;
10200 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10201 			   is_freesync_video_mode(&new_crtc_state->mode,
10202 						  aconnector)) {
10203 			struct drm_display_mode *high_mode;
10204 
10205 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10206 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10207 				set_freesync_fixed_config(dm_new_crtc_state);
10208 			}
10209 		}
10210 
10211 		ret = dm_atomic_get_state(state, &dm_state);
10212 		if (ret)
10213 			goto fail;
10214 
10215 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10216 				crtc->base.id);
10217 
10218 		/* i.e. reset mode */
10219 		if (dc_remove_stream_from_ctx(
10220 				dm->dc,
10221 				dm_state->context,
10222 				dm_old_crtc_state->stream) != DC_OK) {
10223 			ret = -EINVAL;
10224 			goto fail;
10225 		}
10226 
10227 		dc_stream_release(dm_old_crtc_state->stream);
10228 		dm_new_crtc_state->stream = NULL;
10229 
10230 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10231 
10232 		*lock_and_validation_needed = true;
10233 
10234 	} else {/* Add stream for any updated/enabled CRTC */
10235 		/*
10236 		 * Quick fix to prevent NULL pointer on new_stream when
10237 		 * added MST connectors not found in existing crtc_state in the chained mode
10238 		 * TODO: need to dig out the root cause of that
10239 		 */
10240 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10241 			goto skip_modeset;
10242 
10243 		if (modereset_required(new_crtc_state))
10244 			goto skip_modeset;
10245 
10246 		if (modeset_required(new_crtc_state, new_stream,
10247 				     dm_old_crtc_state->stream)) {
10248 
10249 			WARN_ON(dm_new_crtc_state->stream);
10250 
10251 			ret = dm_atomic_get_state(state, &dm_state);
10252 			if (ret)
10253 				goto fail;
10254 
10255 			dm_new_crtc_state->stream = new_stream;
10256 
10257 			dc_stream_retain(new_stream);
10258 
10259 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10260 					 crtc->base.id);
10261 
10262 			if (dc_add_stream_to_ctx(
10263 					dm->dc,
10264 					dm_state->context,
10265 					dm_new_crtc_state->stream) != DC_OK) {
10266 				ret = -EINVAL;
10267 				goto fail;
10268 			}
10269 
10270 			*lock_and_validation_needed = true;
10271 		}
10272 	}
10273 
10274 skip_modeset:
10275 	/* Release extra reference */
10276 	if (new_stream)
10277 		 dc_stream_release(new_stream);
10278 
10279 	/*
10280 	 * We want to do dc stream updates that do not require a
10281 	 * full modeset below.
10282 	 */
10283 	if (!(enable && aconnector && new_crtc_state->active))
10284 		return 0;
10285 	/*
10286 	 * Given above conditions, the dc state cannot be NULL because:
10287 	 * 1. We're in the process of enabling CRTCs (just been added
10288 	 *    to the dc context, or already is on the context)
10289 	 * 2. Has a valid connector attached, and
10290 	 * 3. Is currently active and enabled.
10291 	 * => The dc stream state currently exists.
10292 	 */
10293 	BUG_ON(dm_new_crtc_state->stream == NULL);
10294 
10295 	/* Scaling or underscan settings */
10296 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10297 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10298 		update_stream_scaling_settings(
10299 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10300 
10301 	/* ABM settings */
10302 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10303 
10304 	/*
10305 	 * Color management settings. We also update color properties
10306 	 * when a modeset is needed, to ensure it gets reprogrammed.
10307 	 */
10308 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10309 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10310 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10311 		if (ret)
10312 			goto fail;
10313 	}
10314 
10315 	/* Update Freesync settings. */
10316 	get_freesync_config_for_crtc(dm_new_crtc_state,
10317 				     dm_new_conn_state);
10318 
10319 	return ret;
10320 
10321 fail:
10322 	if (new_stream)
10323 		dc_stream_release(new_stream);
10324 	return ret;
10325 }
10326 
10327 static bool should_reset_plane(struct drm_atomic_state *state,
10328 			       struct drm_plane *plane,
10329 			       struct drm_plane_state *old_plane_state,
10330 			       struct drm_plane_state *new_plane_state)
10331 {
10332 	struct drm_plane *other;
10333 	struct drm_plane_state *old_other_state, *new_other_state;
10334 	struct drm_crtc_state *new_crtc_state;
10335 	int i;
10336 
10337 	/*
10338 	 * TODO: Remove this hack once the checks below are sufficient
10339 	 * enough to determine when we need to reset all the planes on
10340 	 * the stream.
10341 	 */
10342 	if (state->allow_modeset)
10343 		return true;
10344 
10345 	/* Exit early if we know that we're adding or removing the plane. */
10346 	if (old_plane_state->crtc != new_plane_state->crtc)
10347 		return true;
10348 
10349 	/* old crtc == new_crtc == NULL, plane not in context. */
10350 	if (!new_plane_state->crtc)
10351 		return false;
10352 
10353 	new_crtc_state =
10354 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10355 
10356 	if (!new_crtc_state)
10357 		return true;
10358 
10359 	/* CRTC Degamma changes currently require us to recreate planes. */
10360 	if (new_crtc_state->color_mgmt_changed)
10361 		return true;
10362 
10363 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10364 		return true;
10365 
10366 	/*
10367 	 * If there are any new primary or overlay planes being added or
10368 	 * removed then the z-order can potentially change. To ensure
10369 	 * correct z-order and pipe acquisition the current DC architecture
10370 	 * requires us to remove and recreate all existing planes.
10371 	 *
10372 	 * TODO: Come up with a more elegant solution for this.
10373 	 */
10374 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10375 		struct amdgpu_framebuffer *old_afb, *new_afb;
10376 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10377 			continue;
10378 
10379 		if (old_other_state->crtc != new_plane_state->crtc &&
10380 		    new_other_state->crtc != new_plane_state->crtc)
10381 			continue;
10382 
10383 		if (old_other_state->crtc != new_other_state->crtc)
10384 			return true;
10385 
10386 		/* Src/dst size and scaling updates. */
10387 		if (old_other_state->src_w != new_other_state->src_w ||
10388 		    old_other_state->src_h != new_other_state->src_h ||
10389 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10390 		    old_other_state->crtc_h != new_other_state->crtc_h)
10391 			return true;
10392 
10393 		/* Rotation / mirroring updates. */
10394 		if (old_other_state->rotation != new_other_state->rotation)
10395 			return true;
10396 
10397 		/* Blending updates. */
10398 		if (old_other_state->pixel_blend_mode !=
10399 		    new_other_state->pixel_blend_mode)
10400 			return true;
10401 
10402 		/* Alpha updates. */
10403 		if (old_other_state->alpha != new_other_state->alpha)
10404 			return true;
10405 
10406 		/* Colorspace changes. */
10407 		if (old_other_state->color_range != new_other_state->color_range ||
10408 		    old_other_state->color_encoding != new_other_state->color_encoding)
10409 			return true;
10410 
10411 		/* Framebuffer checks fall at the end. */
10412 		if (!old_other_state->fb || !new_other_state->fb)
10413 			continue;
10414 
10415 		/* Pixel format changes can require bandwidth updates. */
10416 		if (old_other_state->fb->format != new_other_state->fb->format)
10417 			return true;
10418 
10419 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10420 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10421 
10422 		/* Tiling and DCC changes also require bandwidth updates. */
10423 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10424 		    old_afb->base.modifier != new_afb->base.modifier)
10425 			return true;
10426 	}
10427 
10428 	return false;
10429 }
10430 
10431 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10432 			      struct drm_plane_state *new_plane_state,
10433 			      struct drm_framebuffer *fb)
10434 {
10435 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10436 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10437 	unsigned int pitch;
10438 	bool linear;
10439 
10440 	if (fb->width > new_acrtc->max_cursor_width ||
10441 	    fb->height > new_acrtc->max_cursor_height) {
10442 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10443 				 new_plane_state->fb->width,
10444 				 new_plane_state->fb->height);
10445 		return -EINVAL;
10446 	}
10447 	if (new_plane_state->src_w != fb->width << 16 ||
10448 	    new_plane_state->src_h != fb->height << 16) {
10449 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10450 		return -EINVAL;
10451 	}
10452 
10453 	/* Pitch in pixels */
10454 	pitch = fb->pitches[0] / fb->format->cpp[0];
10455 
10456 	if (fb->width != pitch) {
10457 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10458 				 fb->width, pitch);
10459 		return -EINVAL;
10460 	}
10461 
10462 	switch (pitch) {
10463 	case 64:
10464 	case 128:
10465 	case 256:
10466 		/* FB pitch is supported by cursor plane */
10467 		break;
10468 	default:
10469 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10470 		return -EINVAL;
10471 	}
10472 
10473 	/* Core DRM takes care of checking FB modifiers, so we only need to
10474 	 * check tiling flags when the FB doesn't have a modifier. */
10475 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10476 		if (adev->family < AMDGPU_FAMILY_AI) {
10477 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10478 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10479 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10480 		} else {
10481 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10482 		}
10483 		if (!linear) {
10484 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10485 			return -EINVAL;
10486 		}
10487 	}
10488 
10489 	return 0;
10490 }
10491 
10492 static int dm_update_plane_state(struct dc *dc,
10493 				 struct drm_atomic_state *state,
10494 				 struct drm_plane *plane,
10495 				 struct drm_plane_state *old_plane_state,
10496 				 struct drm_plane_state *new_plane_state,
10497 				 bool enable,
10498 				 bool *lock_and_validation_needed)
10499 {
10500 
10501 	struct dm_atomic_state *dm_state = NULL;
10502 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10503 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10504 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10505 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10506 	struct amdgpu_crtc *new_acrtc;
10507 	bool needs_reset;
10508 	int ret = 0;
10509 
10510 
10511 	new_plane_crtc = new_plane_state->crtc;
10512 	old_plane_crtc = old_plane_state->crtc;
10513 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10514 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10515 
10516 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10517 		if (!enable || !new_plane_crtc ||
10518 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10519 			return 0;
10520 
10521 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10522 
10523 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10524 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10525 			return -EINVAL;
10526 		}
10527 
10528 		if (new_plane_state->fb) {
10529 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10530 						 new_plane_state->fb);
10531 			if (ret)
10532 				return ret;
10533 		}
10534 
10535 		return 0;
10536 	}
10537 
10538 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10539 					 new_plane_state);
10540 
10541 	/* Remove any changed/removed planes */
10542 	if (!enable) {
10543 		if (!needs_reset)
10544 			return 0;
10545 
10546 		if (!old_plane_crtc)
10547 			return 0;
10548 
10549 		old_crtc_state = drm_atomic_get_old_crtc_state(
10550 				state, old_plane_crtc);
10551 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10552 
10553 		if (!dm_old_crtc_state->stream)
10554 			return 0;
10555 
10556 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10557 				plane->base.id, old_plane_crtc->base.id);
10558 
10559 		ret = dm_atomic_get_state(state, &dm_state);
10560 		if (ret)
10561 			return ret;
10562 
10563 		if (!dc_remove_plane_from_context(
10564 				dc,
10565 				dm_old_crtc_state->stream,
10566 				dm_old_plane_state->dc_state,
10567 				dm_state->context)) {
10568 
10569 			return -EINVAL;
10570 		}
10571 
10572 
10573 		dc_plane_state_release(dm_old_plane_state->dc_state);
10574 		dm_new_plane_state->dc_state = NULL;
10575 
10576 		*lock_and_validation_needed = true;
10577 
10578 	} else { /* Add new planes */
10579 		struct dc_plane_state *dc_new_plane_state;
10580 
10581 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10582 			return 0;
10583 
10584 		if (!new_plane_crtc)
10585 			return 0;
10586 
10587 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10588 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10589 
10590 		if (!dm_new_crtc_state->stream)
10591 			return 0;
10592 
10593 		if (!needs_reset)
10594 			return 0;
10595 
10596 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10597 		if (ret)
10598 			return ret;
10599 
10600 		WARN_ON(dm_new_plane_state->dc_state);
10601 
10602 		dc_new_plane_state = dc_create_plane_state(dc);
10603 		if (!dc_new_plane_state)
10604 			return -ENOMEM;
10605 
10606 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10607 				 plane->base.id, new_plane_crtc->base.id);
10608 
10609 		ret = fill_dc_plane_attributes(
10610 			drm_to_adev(new_plane_crtc->dev),
10611 			dc_new_plane_state,
10612 			new_plane_state,
10613 			new_crtc_state);
10614 		if (ret) {
10615 			dc_plane_state_release(dc_new_plane_state);
10616 			return ret;
10617 		}
10618 
10619 		ret = dm_atomic_get_state(state, &dm_state);
10620 		if (ret) {
10621 			dc_plane_state_release(dc_new_plane_state);
10622 			return ret;
10623 		}
10624 
10625 		/*
10626 		 * Any atomic check errors that occur after this will
10627 		 * not need a release. The plane state will be attached
10628 		 * to the stream, and therefore part of the atomic
10629 		 * state. It'll be released when the atomic state is
10630 		 * cleaned.
10631 		 */
10632 		if (!dc_add_plane_to_context(
10633 				dc,
10634 				dm_new_crtc_state->stream,
10635 				dc_new_plane_state,
10636 				dm_state->context)) {
10637 
10638 			dc_plane_state_release(dc_new_plane_state);
10639 			return -EINVAL;
10640 		}
10641 
10642 		dm_new_plane_state->dc_state = dc_new_plane_state;
10643 
10644 		/* Tell DC to do a full surface update every time there
10645 		 * is a plane change. Inefficient, but works for now.
10646 		 */
10647 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10648 
10649 		*lock_and_validation_needed = true;
10650 	}
10651 
10652 
10653 	return ret;
10654 }
10655 
10656 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10657 				struct drm_crtc *crtc,
10658 				struct drm_crtc_state *new_crtc_state)
10659 {
10660 	struct drm_plane *cursor = crtc->cursor, *underlying;
10661 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10662 	int i;
10663 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10664 
10665 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10666 	 * cursor per pipe but it's going to inherit the scaling and
10667 	 * positioning from the underlying pipe. Check the cursor plane's
10668 	 * blending properties match the underlying planes'. */
10669 
10670 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10671 	if (!new_cursor_state || !new_cursor_state->fb) {
10672 		return 0;
10673 	}
10674 
10675 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10676 			 (new_cursor_state->src_w >> 16);
10677 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10678 			 (new_cursor_state->src_h >> 16);
10679 
10680 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10681 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10682 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10683 			continue;
10684 
10685 		/* Ignore disabled planes */
10686 		if (!new_underlying_state->fb)
10687 			continue;
10688 
10689 		underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10690 				     (new_underlying_state->src_w >> 16);
10691 		underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10692 				     (new_underlying_state->src_h >> 16);
10693 
10694 		if (cursor_scale_w != underlying_scale_w ||
10695 		    cursor_scale_h != underlying_scale_h) {
10696 			drm_dbg_atomic(crtc->dev,
10697 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10698 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10699 			return -EINVAL;
10700 		}
10701 
10702 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10703 		if (new_underlying_state->crtc_x <= 0 &&
10704 		    new_underlying_state->crtc_y <= 0 &&
10705 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10706 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10707 			break;
10708 	}
10709 
10710 	return 0;
10711 }
10712 
10713 #if defined(CONFIG_DRM_AMD_DC_DCN)
10714 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10715 {
10716 	struct drm_connector *connector;
10717 	struct drm_connector_state *conn_state;
10718 	struct amdgpu_dm_connector *aconnector = NULL;
10719 	int i;
10720 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10721 		if (conn_state->crtc != crtc)
10722 			continue;
10723 
10724 		aconnector = to_amdgpu_dm_connector(connector);
10725 		if (!aconnector->port || !aconnector->mst_port)
10726 			aconnector = NULL;
10727 		else
10728 			break;
10729 	}
10730 
10731 	if (!aconnector)
10732 		return 0;
10733 
10734 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10735 }
10736 #endif
10737 
10738 /**
10739  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10740  * @dev: The DRM device
10741  * @state: The atomic state to commit
10742  *
10743  * Validate that the given atomic state is programmable by DC into hardware.
10744  * This involves constructing a &struct dc_state reflecting the new hardware
10745  * state we wish to commit, then querying DC to see if it is programmable. It's
10746  * important not to modify the existing DC state. Otherwise, atomic_check
10747  * may unexpectedly commit hardware changes.
10748  *
10749  * When validating the DC state, it's important that the right locks are
10750  * acquired. For full updates case which removes/adds/updates streams on one
10751  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10752  * that any such full update commit will wait for completion of any outstanding
10753  * flip using DRMs synchronization events.
10754  *
10755  * Note that DM adds the affected connectors for all CRTCs in state, when that
10756  * might not seem necessary. This is because DC stream creation requires the
10757  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10758  * be possible but non-trivial - a possible TODO item.
10759  *
10760  * Return: -Error code if validation failed.
10761  */
10762 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10763 				  struct drm_atomic_state *state)
10764 {
10765 	struct amdgpu_device *adev = drm_to_adev(dev);
10766 	struct dm_atomic_state *dm_state = NULL;
10767 	struct dc *dc = adev->dm.dc;
10768 	struct drm_connector *connector;
10769 	struct drm_connector_state *old_con_state, *new_con_state;
10770 	struct drm_crtc *crtc;
10771 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10772 	struct drm_plane *plane;
10773 	struct drm_plane_state *old_plane_state, *new_plane_state;
10774 	enum dc_status status;
10775 	int ret, i;
10776 	bool lock_and_validation_needed = false;
10777 	struct dm_crtc_state *dm_old_crtc_state;
10778 #if defined(CONFIG_DRM_AMD_DC_DCN)
10779 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10780 	struct drm_dp_mst_topology_state *mst_state;
10781 	struct drm_dp_mst_topology_mgr *mgr;
10782 #endif
10783 
10784 	trace_amdgpu_dm_atomic_check_begin(state);
10785 
10786 	ret = drm_atomic_helper_check_modeset(dev, state);
10787 	if (ret)
10788 		goto fail;
10789 
10790 	/* Check connector changes */
10791 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10792 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10793 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10794 
10795 		/* Skip connectors that are disabled or part of modeset already. */
10796 		if (!old_con_state->crtc && !new_con_state->crtc)
10797 			continue;
10798 
10799 		if (!new_con_state->crtc)
10800 			continue;
10801 
10802 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10803 		if (IS_ERR(new_crtc_state)) {
10804 			ret = PTR_ERR(new_crtc_state);
10805 			goto fail;
10806 		}
10807 
10808 		if (dm_old_con_state->abm_level !=
10809 		    dm_new_con_state->abm_level)
10810 			new_crtc_state->connectors_changed = true;
10811 	}
10812 
10813 #if defined(CONFIG_DRM_AMD_DC_DCN)
10814 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10815 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10816 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10817 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10818 				if (ret)
10819 					goto fail;
10820 			}
10821 		}
10822 	}
10823 #endif
10824 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10825 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10826 
10827 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10828 		    !new_crtc_state->color_mgmt_changed &&
10829 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10830 			dm_old_crtc_state->dsc_force_changed == false)
10831 			continue;
10832 
10833 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10834 		if (ret)
10835 			goto fail;
10836 
10837 		if (!new_crtc_state->enable)
10838 			continue;
10839 
10840 		ret = drm_atomic_add_affected_connectors(state, crtc);
10841 		if (ret)
10842 			goto fail;
10843 
10844 		ret = drm_atomic_add_affected_planes(state, crtc);
10845 		if (ret)
10846 			goto fail;
10847 
10848 		if (dm_old_crtc_state->dsc_force_changed)
10849 			new_crtc_state->mode_changed = true;
10850 	}
10851 
10852 	/*
10853 	 * Add all primary and overlay planes on the CRTC to the state
10854 	 * whenever a plane is enabled to maintain correct z-ordering
10855 	 * and to enable fast surface updates.
10856 	 */
10857 	drm_for_each_crtc(crtc, dev) {
10858 		bool modified = false;
10859 
10860 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10861 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10862 				continue;
10863 
10864 			if (new_plane_state->crtc == crtc ||
10865 			    old_plane_state->crtc == crtc) {
10866 				modified = true;
10867 				break;
10868 			}
10869 		}
10870 
10871 		if (!modified)
10872 			continue;
10873 
10874 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10875 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10876 				continue;
10877 
10878 			new_plane_state =
10879 				drm_atomic_get_plane_state(state, plane);
10880 
10881 			if (IS_ERR(new_plane_state)) {
10882 				ret = PTR_ERR(new_plane_state);
10883 				goto fail;
10884 			}
10885 		}
10886 	}
10887 
10888 	/* Remove exiting planes if they are modified */
10889 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10890 		ret = dm_update_plane_state(dc, state, plane,
10891 					    old_plane_state,
10892 					    new_plane_state,
10893 					    false,
10894 					    &lock_and_validation_needed);
10895 		if (ret)
10896 			goto fail;
10897 	}
10898 
10899 	/* Disable all crtcs which require disable */
10900 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10901 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10902 					   old_crtc_state,
10903 					   new_crtc_state,
10904 					   false,
10905 					   &lock_and_validation_needed);
10906 		if (ret)
10907 			goto fail;
10908 	}
10909 
10910 	/* Enable all crtcs which require enable */
10911 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10912 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10913 					   old_crtc_state,
10914 					   new_crtc_state,
10915 					   true,
10916 					   &lock_and_validation_needed);
10917 		if (ret)
10918 			goto fail;
10919 	}
10920 
10921 	/* Add new/modified planes */
10922 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10923 		ret = dm_update_plane_state(dc, state, plane,
10924 					    old_plane_state,
10925 					    new_plane_state,
10926 					    true,
10927 					    &lock_and_validation_needed);
10928 		if (ret)
10929 			goto fail;
10930 	}
10931 
10932 	/* Run this here since we want to validate the streams we created */
10933 	ret = drm_atomic_helper_check_planes(dev, state);
10934 	if (ret)
10935 		goto fail;
10936 
10937 	/* Check cursor planes scaling */
10938 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10939 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10940 		if (ret)
10941 			goto fail;
10942 	}
10943 
10944 	if (state->legacy_cursor_update) {
10945 		/*
10946 		 * This is a fast cursor update coming from the plane update
10947 		 * helper, check if it can be done asynchronously for better
10948 		 * performance.
10949 		 */
10950 		state->async_update =
10951 			!drm_atomic_helper_async_check(dev, state);
10952 
10953 		/*
10954 		 * Skip the remaining global validation if this is an async
10955 		 * update. Cursor updates can be done without affecting
10956 		 * state or bandwidth calcs and this avoids the performance
10957 		 * penalty of locking the private state object and
10958 		 * allocating a new dc_state.
10959 		 */
10960 		if (state->async_update)
10961 			return 0;
10962 	}
10963 
10964 	/* Check scaling and underscan changes*/
10965 	/* TODO Removed scaling changes validation due to inability to commit
10966 	 * new stream into context w\o causing full reset. Need to
10967 	 * decide how to handle.
10968 	 */
10969 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10970 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10971 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10972 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10973 
10974 		/* Skip any modesets/resets */
10975 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10976 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10977 			continue;
10978 
10979 		/* Skip any thing not scale or underscan changes */
10980 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10981 			continue;
10982 
10983 		lock_and_validation_needed = true;
10984 	}
10985 
10986 #if defined(CONFIG_DRM_AMD_DC_DCN)
10987 	/* set the slot info for each mst_state based on the link encoding format */
10988 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10989 		struct amdgpu_dm_connector *aconnector;
10990 		struct drm_connector *connector;
10991 		struct drm_connector_list_iter iter;
10992 		u8 link_coding_cap;
10993 
10994 		if (!mgr->mst_state )
10995 			continue;
10996 
10997 		drm_connector_list_iter_begin(dev, &iter);
10998 		drm_for_each_connector_iter(connector, &iter) {
10999 			int id = connector->index;
11000 
11001 			if (id == mst_state->mgr->conn_base_id) {
11002 				aconnector = to_amdgpu_dm_connector(connector);
11003 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11004 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11005 
11006 				break;
11007 			}
11008 		}
11009 		drm_connector_list_iter_end(&iter);
11010 
11011 	}
11012 #endif
11013 	/**
11014 	 * Streams and planes are reset when there are changes that affect
11015 	 * bandwidth. Anything that affects bandwidth needs to go through
11016 	 * DC global validation to ensure that the configuration can be applied
11017 	 * to hardware.
11018 	 *
11019 	 * We have to currently stall out here in atomic_check for outstanding
11020 	 * commits to finish in this case because our IRQ handlers reference
11021 	 * DRM state directly - we can end up disabling interrupts too early
11022 	 * if we don't.
11023 	 *
11024 	 * TODO: Remove this stall and drop DM state private objects.
11025 	 */
11026 	if (lock_and_validation_needed) {
11027 		ret = dm_atomic_get_state(state, &dm_state);
11028 		if (ret)
11029 			goto fail;
11030 
11031 		ret = do_aquire_global_lock(dev, state);
11032 		if (ret)
11033 			goto fail;
11034 
11035 #if defined(CONFIG_DRM_AMD_DC_DCN)
11036 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
11037 			goto fail;
11038 
11039 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11040 		if (ret)
11041 			goto fail;
11042 #endif
11043 
11044 		/*
11045 		 * Perform validation of MST topology in the state:
11046 		 * We need to perform MST atomic check before calling
11047 		 * dc_validate_global_state(), or there is a chance
11048 		 * to get stuck in an infinite loop and hang eventually.
11049 		 */
11050 		ret = drm_dp_mst_atomic_check(state);
11051 		if (ret)
11052 			goto fail;
11053 		status = dc_validate_global_state(dc, dm_state->context, false);
11054 		if (status != DC_OK) {
11055 			drm_dbg_atomic(dev,
11056 				       "DC global validation failure: %s (%d)",
11057 				       dc_status_to_str(status), status);
11058 			ret = -EINVAL;
11059 			goto fail;
11060 		}
11061 	} else {
11062 		/*
11063 		 * The commit is a fast update. Fast updates shouldn't change
11064 		 * the DC context, affect global validation, and can have their
11065 		 * commit work done in parallel with other commits not touching
11066 		 * the same resource. If we have a new DC context as part of
11067 		 * the DM atomic state from validation we need to free it and
11068 		 * retain the existing one instead.
11069 		 *
11070 		 * Furthermore, since the DM atomic state only contains the DC
11071 		 * context and can safely be annulled, we can free the state
11072 		 * and clear the associated private object now to free
11073 		 * some memory and avoid a possible use-after-free later.
11074 		 */
11075 
11076 		for (i = 0; i < state->num_private_objs; i++) {
11077 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11078 
11079 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11080 				int j = state->num_private_objs-1;
11081 
11082 				dm_atomic_destroy_state(obj,
11083 						state->private_objs[i].state);
11084 
11085 				/* If i is not at the end of the array then the
11086 				 * last element needs to be moved to where i was
11087 				 * before the array can safely be truncated.
11088 				 */
11089 				if (i != j)
11090 					state->private_objs[i] =
11091 						state->private_objs[j];
11092 
11093 				state->private_objs[j].ptr = NULL;
11094 				state->private_objs[j].state = NULL;
11095 				state->private_objs[j].old_state = NULL;
11096 				state->private_objs[j].new_state = NULL;
11097 
11098 				state->num_private_objs = j;
11099 				break;
11100 			}
11101 		}
11102 	}
11103 
11104 	/* Store the overall update type for use later in atomic check. */
11105 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11106 		struct dm_crtc_state *dm_new_crtc_state =
11107 			to_dm_crtc_state(new_crtc_state);
11108 
11109 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11110 							 UPDATE_TYPE_FULL :
11111 							 UPDATE_TYPE_FAST;
11112 	}
11113 
11114 	/* Must be success */
11115 	WARN_ON(ret);
11116 
11117 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11118 
11119 	return ret;
11120 
11121 fail:
11122 	if (ret == -EDEADLK)
11123 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11124 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11125 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11126 	else
11127 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11128 
11129 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11130 
11131 	return ret;
11132 }
11133 
11134 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11135 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11136 {
11137 	uint8_t dpcd_data;
11138 	bool capable = false;
11139 
11140 	if (amdgpu_dm_connector->dc_link &&
11141 		dm_helpers_dp_read_dpcd(
11142 				NULL,
11143 				amdgpu_dm_connector->dc_link,
11144 				DP_DOWN_STREAM_PORT_COUNT,
11145 				&dpcd_data,
11146 				sizeof(dpcd_data))) {
11147 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11148 	}
11149 
11150 	return capable;
11151 }
11152 
11153 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11154 		unsigned int offset,
11155 		unsigned int total_length,
11156 		uint8_t *data,
11157 		unsigned int length,
11158 		struct amdgpu_hdmi_vsdb_info *vsdb)
11159 {
11160 	bool res;
11161 	union dmub_rb_cmd cmd;
11162 	struct dmub_cmd_send_edid_cea *input;
11163 	struct dmub_cmd_edid_cea_output *output;
11164 
11165 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11166 		return false;
11167 
11168 	memset(&cmd, 0, sizeof(cmd));
11169 
11170 	input = &cmd.edid_cea.data.input;
11171 
11172 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11173 	cmd.edid_cea.header.sub_type = 0;
11174 	cmd.edid_cea.header.payload_bytes =
11175 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11176 	input->offset = offset;
11177 	input->length = length;
11178 	input->total_length = total_length;
11179 	memcpy(input->payload, data, length);
11180 
11181 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11182 	if (!res) {
11183 		DRM_ERROR("EDID CEA parser failed\n");
11184 		return false;
11185 	}
11186 
11187 	output = &cmd.edid_cea.data.output;
11188 
11189 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11190 		if (!output->ack.success) {
11191 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11192 					output->ack.offset);
11193 		}
11194 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11195 		if (!output->amd_vsdb.vsdb_found)
11196 			return false;
11197 
11198 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11199 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11200 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11201 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11202 	} else {
11203 		DRM_WARN("Unknown EDID CEA parser results\n");
11204 		return false;
11205 	}
11206 
11207 	return true;
11208 }
11209 
11210 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11211 		uint8_t *edid_ext, int len,
11212 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11213 {
11214 	int i;
11215 
11216 	/* send extension block to DMCU for parsing */
11217 	for (i = 0; i < len; i += 8) {
11218 		bool res;
11219 		int offset;
11220 
11221 		/* send 8 bytes a time */
11222 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11223 			return false;
11224 
11225 		if (i+8 == len) {
11226 			/* EDID block sent completed, expect result */
11227 			int version, min_rate, max_rate;
11228 
11229 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11230 			if (res) {
11231 				/* amd vsdb found */
11232 				vsdb_info->freesync_supported = 1;
11233 				vsdb_info->amd_vsdb_version = version;
11234 				vsdb_info->min_refresh_rate_hz = min_rate;
11235 				vsdb_info->max_refresh_rate_hz = max_rate;
11236 				return true;
11237 			}
11238 			/* not amd vsdb */
11239 			return false;
11240 		}
11241 
11242 		/* check for ack*/
11243 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11244 		if (!res)
11245 			return false;
11246 	}
11247 
11248 	return false;
11249 }
11250 
11251 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11252 		uint8_t *edid_ext, int len,
11253 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11254 {
11255 	int i;
11256 
11257 	/* send extension block to DMCU for parsing */
11258 	for (i = 0; i < len; i += 8) {
11259 		/* send 8 bytes a time */
11260 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11261 			return false;
11262 	}
11263 
11264 	return vsdb_info->freesync_supported;
11265 }
11266 
11267 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11268 		uint8_t *edid_ext, int len,
11269 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11270 {
11271 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11272 
11273 	if (adev->dm.dmub_srv)
11274 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11275 	else
11276 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11277 }
11278 
11279 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11280 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11281 {
11282 	uint8_t *edid_ext = NULL;
11283 	int i;
11284 	bool valid_vsdb_found = false;
11285 
11286 	/*----- drm_find_cea_extension() -----*/
11287 	/* No EDID or EDID extensions */
11288 	if (edid == NULL || edid->extensions == 0)
11289 		return -ENODEV;
11290 
11291 	/* Find CEA extension */
11292 	for (i = 0; i < edid->extensions; i++) {
11293 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11294 		if (edid_ext[0] == CEA_EXT)
11295 			break;
11296 	}
11297 
11298 	if (i == edid->extensions)
11299 		return -ENODEV;
11300 
11301 	/*----- cea_db_offsets() -----*/
11302 	if (edid_ext[0] != CEA_EXT)
11303 		return -ENODEV;
11304 
11305 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11306 
11307 	return valid_vsdb_found ? i : -ENODEV;
11308 }
11309 
11310 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11311 					struct edid *edid)
11312 {
11313 	int i = 0;
11314 	struct detailed_timing *timing;
11315 	struct detailed_non_pixel *data;
11316 	struct detailed_data_monitor_range *range;
11317 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11318 			to_amdgpu_dm_connector(connector);
11319 	struct dm_connector_state *dm_con_state = NULL;
11320 	struct dc_sink *sink;
11321 
11322 	struct drm_device *dev = connector->dev;
11323 	struct amdgpu_device *adev = drm_to_adev(dev);
11324 	bool freesync_capable = false;
11325 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11326 
11327 	if (!connector->state) {
11328 		DRM_ERROR("%s - Connector has no state", __func__);
11329 		goto update;
11330 	}
11331 
11332 	sink = amdgpu_dm_connector->dc_sink ?
11333 		amdgpu_dm_connector->dc_sink :
11334 		amdgpu_dm_connector->dc_em_sink;
11335 
11336 	if (!edid || !sink) {
11337 		dm_con_state = to_dm_connector_state(connector->state);
11338 
11339 		amdgpu_dm_connector->min_vfreq = 0;
11340 		amdgpu_dm_connector->max_vfreq = 0;
11341 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11342 		connector->display_info.monitor_range.min_vfreq = 0;
11343 		connector->display_info.monitor_range.max_vfreq = 0;
11344 		freesync_capable = false;
11345 
11346 		goto update;
11347 	}
11348 
11349 	dm_con_state = to_dm_connector_state(connector->state);
11350 
11351 	if (!adev->dm.freesync_module)
11352 		goto update;
11353 
11354 
11355 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11356 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11357 		bool edid_check_required = false;
11358 
11359 		if (edid) {
11360 			edid_check_required = is_dp_capable_without_timing_msa(
11361 						adev->dm.dc,
11362 						amdgpu_dm_connector);
11363 		}
11364 
11365 		if (edid_check_required == true && (edid->version > 1 ||
11366 		   (edid->version == 1 && edid->revision > 1))) {
11367 			for (i = 0; i < 4; i++) {
11368 
11369 				timing	= &edid->detailed_timings[i];
11370 				data	= &timing->data.other_data;
11371 				range	= &data->data.range;
11372 				/*
11373 				 * Check if monitor has continuous frequency mode
11374 				 */
11375 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11376 					continue;
11377 				/*
11378 				 * Check for flag range limits only. If flag == 1 then
11379 				 * no additional timing information provided.
11380 				 * Default GTF, GTF Secondary curve and CVT are not
11381 				 * supported
11382 				 */
11383 				if (range->flags != 1)
11384 					continue;
11385 
11386 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11387 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11388 				amdgpu_dm_connector->pixel_clock_mhz =
11389 					range->pixel_clock_mhz * 10;
11390 
11391 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11392 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11393 
11394 				break;
11395 			}
11396 
11397 			if (amdgpu_dm_connector->max_vfreq -
11398 			    amdgpu_dm_connector->min_vfreq > 10) {
11399 
11400 				freesync_capable = true;
11401 			}
11402 		}
11403 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11404 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11405 		if (i >= 0 && vsdb_info.freesync_supported) {
11406 			timing  = &edid->detailed_timings[i];
11407 			data    = &timing->data.other_data;
11408 
11409 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11410 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11411 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11412 				freesync_capable = true;
11413 
11414 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11415 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11416 		}
11417 	}
11418 
11419 update:
11420 	if (dm_con_state)
11421 		dm_con_state->freesync_capable = freesync_capable;
11422 
11423 	if (connector->vrr_capable_property)
11424 		drm_connector_set_vrr_capable_property(connector,
11425 						       freesync_capable);
11426 }
11427 
11428 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11429 {
11430 	struct amdgpu_device *adev = drm_to_adev(dev);
11431 	struct dc *dc = adev->dm.dc;
11432 	int i;
11433 
11434 	mutex_lock(&adev->dm.dc_lock);
11435 	if (dc->current_state) {
11436 		for (i = 0; i < dc->current_state->stream_count; ++i)
11437 			dc->current_state->streams[i]
11438 				->triggered_crtc_reset.enabled =
11439 				adev->dm.force_timing_sync;
11440 
11441 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11442 		dc_trigger_sync(dc, dc->current_state);
11443 	}
11444 	mutex_unlock(&adev->dm.dc_lock);
11445 }
11446 
11447 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11448 		       uint32_t value, const char *func_name)
11449 {
11450 #ifdef DM_CHECK_ADDR_0
11451 	if (address == 0) {
11452 		DC_ERR("invalid register write. address = 0");
11453 		return;
11454 	}
11455 #endif
11456 	cgs_write_register(ctx->cgs_device, address, value);
11457 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11458 }
11459 
11460 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11461 			  const char *func_name)
11462 {
11463 	uint32_t value;
11464 #ifdef DM_CHECK_ADDR_0
11465 	if (address == 0) {
11466 		DC_ERR("invalid register read; address = 0\n");
11467 		return 0;
11468 	}
11469 #endif
11470 
11471 	if (ctx->dmub_srv &&
11472 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11473 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11474 		ASSERT(false);
11475 		return 0;
11476 	}
11477 
11478 	value = cgs_read_register(ctx->cgs_device, address);
11479 
11480 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11481 
11482 	return value;
11483 }
11484 
11485 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11486 	uint8_t status_type, uint32_t *operation_result)
11487 {
11488 	struct amdgpu_device *adev = ctx->driver_context;
11489 	int return_status = -1;
11490 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11491 
11492 	if (is_cmd_aux) {
11493 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11494 			return_status = p_notify->aux_reply.length;
11495 			*operation_result = p_notify->result;
11496 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11497 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11498 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11499 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11500 		} else {
11501 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11502 		}
11503 	} else {
11504 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11505 			return_status = 0;
11506 			*operation_result = p_notify->sc_status;
11507 		} else {
11508 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11509 		}
11510 	}
11511 
11512 	return return_status;
11513 }
11514 
11515 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11516 	unsigned int link_index, void *cmd_payload, void *operation_result)
11517 {
11518 	struct amdgpu_device *adev = ctx->driver_context;
11519 	int ret = 0;
11520 
11521 	if (is_cmd_aux) {
11522 		dc_process_dmub_aux_transfer_async(ctx->dc,
11523 			link_index, (struct aux_payload *)cmd_payload);
11524 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11525 					(struct set_config_cmd_payload *)cmd_payload,
11526 					adev->dm.dmub_notify)) {
11527 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11528 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11529 					(uint32_t *)operation_result);
11530 	}
11531 
11532 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11533 	if (ret == 0) {
11534 		DRM_ERROR("wait_for_completion_timeout timeout!");
11535 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11536 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11537 				(uint32_t *)operation_result);
11538 	}
11539 
11540 	if (is_cmd_aux) {
11541 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11542 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11543 
11544 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11545 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11546 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11547 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11548 				       adev->dm.dmub_notify->aux_reply.length);
11549 			}
11550 		}
11551 	}
11552 
11553 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11554 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11555 			(uint32_t *)operation_result);
11556 }
11557