1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_fb_helper.h>
82 #include <drm/drm_fourcc.h>
83 #include <drm/drm_edid.h>
84 #include <drm/drm_vblank.h>
85 #include <drm/drm_audio_component.h>
86 
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "soc15_common.h"
93 #include "vega10_ip_offset.h"
94 
95 #include "soc15_common.h"
96 
97 #include "gc/gc_11_0_0_offset.h"
98 #include "gc/gc_11_0_0_sh_mask.h"
99 
100 #include "modules/inc/mod_freesync.h"
101 #include "modules/power/power_helpers.h"
102 #include "modules/inc/mod_info_packet.h"
103 
104 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
106 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
108 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
110 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
112 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
114 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
116 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
118 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
120 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
121 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
122 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
123 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
124 
125 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
127 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
129 
130 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
131 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
132 
133 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
134 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
135 
136 /* Number of bytes in PSP header for firmware. */
137 #define PSP_HEADER_BYTES 0x100
138 
139 /* Number of bytes in PSP footer for firmware. */
140 #define PSP_FOOTER_BYTES 0x100
141 
142 /**
143  * DOC: overview
144  *
145  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
146  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
147  * requests into DC requests, and DC responses into DRM responses.
148  *
149  * The root control structure is &struct amdgpu_display_manager.
150  */
151 
152 /* basic init/fini API */
153 static int amdgpu_dm_init(struct amdgpu_device *adev);
154 static void amdgpu_dm_fini(struct amdgpu_device *adev);
155 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
156 
157 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
158 {
159 	switch (link->dpcd_caps.dongle_type) {
160 	case DISPLAY_DONGLE_NONE:
161 		return DRM_MODE_SUBCONNECTOR_Native;
162 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
163 		return DRM_MODE_SUBCONNECTOR_VGA;
164 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
165 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
166 		return DRM_MODE_SUBCONNECTOR_DVID;
167 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
168 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
169 		return DRM_MODE_SUBCONNECTOR_HDMIA;
170 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
171 	default:
172 		return DRM_MODE_SUBCONNECTOR_Unknown;
173 	}
174 }
175 
176 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
177 {
178 	struct dc_link *link = aconnector->dc_link;
179 	struct drm_connector *connector = &aconnector->base;
180 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
181 
182 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
183 		return;
184 
185 	if (aconnector->dc_sink)
186 		subconnector = get_subconnector_type(link);
187 
188 	drm_object_property_set_value(&connector->base,
189 			connector->dev->mode_config.dp_subconnector_property,
190 			subconnector);
191 }
192 
193 /*
194  * initializes drm_device display related structures, based on the information
195  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
196  * drm_encoder, drm_mode_config
197  *
198  * Returns 0 on success
199  */
200 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
201 /* removes and deallocates the drm structures, created by the above function */
202 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
203 
204 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
205 				struct drm_plane *plane,
206 				unsigned long possible_crtcs,
207 				const struct dc_plane_cap *plane_cap);
208 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
209 			       struct drm_plane *plane,
210 			       uint32_t link_index);
211 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
212 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
213 				    uint32_t link_index,
214 				    struct amdgpu_encoder *amdgpu_encoder);
215 static int amdgpu_dm_encoder_init(struct drm_device *dev,
216 				  struct amdgpu_encoder *aencoder,
217 				  uint32_t link_index);
218 
219 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
220 
221 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
222 
223 static int amdgpu_dm_atomic_check(struct drm_device *dev,
224 				  struct drm_atomic_state *state);
225 
226 static void handle_cursor_update(struct drm_plane *plane,
227 				 struct drm_plane_state *old_plane_state);
228 
229 static const struct drm_format_info *
230 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
231 
232 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
233 static void handle_hpd_rx_irq(void *param);
234 
235 static bool
236 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
237 				 struct drm_crtc_state *new_crtc_state);
238 /*
239  * dm_vblank_get_counter
240  *
241  * @brief
242  * Get counter for number of vertical blanks
243  *
244  * @param
245  * struct amdgpu_device *adev - [in] desired amdgpu device
246  * int disp_idx - [in] which CRTC to get the counter from
247  *
248  * @return
249  * Counter for vertical blanks
250  */
251 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
252 {
253 	if (crtc >= adev->mode_info.num_crtc)
254 		return 0;
255 	else {
256 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
257 
258 		if (acrtc->dm_irq_params.stream == NULL) {
259 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
260 				  crtc);
261 			return 0;
262 		}
263 
264 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
265 	}
266 }
267 
268 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
269 				  u32 *vbl, u32 *position)
270 {
271 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
272 
273 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
274 		return -EINVAL;
275 	else {
276 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
277 
278 		if (acrtc->dm_irq_params.stream ==  NULL) {
279 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
280 				  crtc);
281 			return 0;
282 		}
283 
284 		/*
285 		 * TODO rework base driver to use values directly.
286 		 * for now parse it back into reg-format
287 		 */
288 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
289 					 &v_blank_start,
290 					 &v_blank_end,
291 					 &h_position,
292 					 &v_position);
293 
294 		*position = v_position | (h_position << 16);
295 		*vbl = v_blank_start | (v_blank_end << 16);
296 	}
297 
298 	return 0;
299 }
300 
301 static bool dm_is_idle(void *handle)
302 {
303 	/* XXX todo */
304 	return true;
305 }
306 
307 static int dm_wait_for_idle(void *handle)
308 {
309 	/* XXX todo */
310 	return 0;
311 }
312 
313 static bool dm_check_soft_reset(void *handle)
314 {
315 	return false;
316 }
317 
318 static int dm_soft_reset(void *handle)
319 {
320 	/* XXX todo */
321 	return 0;
322 }
323 
324 static struct amdgpu_crtc *
325 get_crtc_by_otg_inst(struct amdgpu_device *adev,
326 		     int otg_inst)
327 {
328 	struct drm_device *dev = adev_to_drm(adev);
329 	struct drm_crtc *crtc;
330 	struct amdgpu_crtc *amdgpu_crtc;
331 
332 	if (WARN_ON(otg_inst == -1))
333 		return adev->mode_info.crtcs[0];
334 
335 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
336 		amdgpu_crtc = to_amdgpu_crtc(crtc);
337 
338 		if (amdgpu_crtc->otg_inst == otg_inst)
339 			return amdgpu_crtc;
340 	}
341 
342 	return NULL;
343 }
344 
345 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
346 {
347 	return acrtc->dm_irq_params.freesync_config.state ==
348 		       VRR_STATE_ACTIVE_VARIABLE ||
349 	       acrtc->dm_irq_params.freesync_config.state ==
350 		       VRR_STATE_ACTIVE_FIXED;
351 }
352 
353 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
354 {
355 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
356 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
357 }
358 
359 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
360 					      struct dm_crtc_state *new_state)
361 {
362 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
363 		return true;
364 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
365 		return true;
366 	else
367 		return false;
368 }
369 
370 /**
371  * dm_pflip_high_irq() - Handle pageflip interrupt
372  * @interrupt_params: ignored
373  *
374  * Handles the pageflip interrupt by notifying all interested parties
375  * that the pageflip has been completed.
376  */
377 static void dm_pflip_high_irq(void *interrupt_params)
378 {
379 	struct amdgpu_crtc *amdgpu_crtc;
380 	struct common_irq_params *irq_params = interrupt_params;
381 	struct amdgpu_device *adev = irq_params->adev;
382 	unsigned long flags;
383 	struct drm_pending_vblank_event *e;
384 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
385 	bool vrr_active;
386 
387 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
388 
389 	/* IRQ could occur when in initial stage */
390 	/* TODO work and BO cleanup */
391 	if (amdgpu_crtc == NULL) {
392 		DC_LOG_PFLIP("CRTC is null, returning.\n");
393 		return;
394 	}
395 
396 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
397 
398 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
399 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
400 						 amdgpu_crtc->pflip_status,
401 						 AMDGPU_FLIP_SUBMITTED,
402 						 amdgpu_crtc->crtc_id,
403 						 amdgpu_crtc);
404 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
405 		return;
406 	}
407 
408 	/* page flip completed. */
409 	e = amdgpu_crtc->event;
410 	amdgpu_crtc->event = NULL;
411 
412 	WARN_ON(!e);
413 
414 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
415 
416 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
417 	if (!vrr_active ||
418 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
419 				      &v_blank_end, &hpos, &vpos) ||
420 	    (vpos < v_blank_start)) {
421 		/* Update to correct count and vblank timestamp if racing with
422 		 * vblank irq. This also updates to the correct vblank timestamp
423 		 * even in VRR mode, as scanout is past the front-porch atm.
424 		 */
425 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
426 
427 		/* Wake up userspace by sending the pageflip event with proper
428 		 * count and timestamp of vblank of flip completion.
429 		 */
430 		if (e) {
431 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
432 
433 			/* Event sent, so done with vblank for this flip */
434 			drm_crtc_vblank_put(&amdgpu_crtc->base);
435 		}
436 	} else if (e) {
437 		/* VRR active and inside front-porch: vblank count and
438 		 * timestamp for pageflip event will only be up to date after
439 		 * drm_crtc_handle_vblank() has been executed from late vblank
440 		 * irq handler after start of back-porch (vline 0). We queue the
441 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
442 		 * updated timestamp and count, once it runs after us.
443 		 *
444 		 * We need to open-code this instead of using the helper
445 		 * drm_crtc_arm_vblank_event(), as that helper would
446 		 * call drm_crtc_accurate_vblank_count(), which we must
447 		 * not call in VRR mode while we are in front-porch!
448 		 */
449 
450 		/* sequence will be replaced by real count during send-out. */
451 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
452 		e->pipe = amdgpu_crtc->crtc_id;
453 
454 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
455 		e = NULL;
456 	}
457 
458 	/* Keep track of vblank of this flip for flip throttling. We use the
459 	 * cooked hw counter, as that one incremented at start of this vblank
460 	 * of pageflip completion, so last_flip_vblank is the forbidden count
461 	 * for queueing new pageflips if vsync + VRR is enabled.
462 	 */
463 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
464 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
465 
466 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
467 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
468 
469 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
470 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
471 		     vrr_active, (int) !e);
472 }
473 
474 static void dm_vupdate_high_irq(void *interrupt_params)
475 {
476 	struct common_irq_params *irq_params = interrupt_params;
477 	struct amdgpu_device *adev = irq_params->adev;
478 	struct amdgpu_crtc *acrtc;
479 	struct drm_device *drm_dev;
480 	struct drm_vblank_crtc *vblank;
481 	ktime_t frame_duration_ns, previous_timestamp;
482 	unsigned long flags;
483 	int vrr_active;
484 
485 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
486 
487 	if (acrtc) {
488 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
489 		drm_dev = acrtc->base.dev;
490 		vblank = &drm_dev->vblank[acrtc->base.index];
491 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
492 		frame_duration_ns = vblank->time - previous_timestamp;
493 
494 		if (frame_duration_ns > 0) {
495 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
496 						frame_duration_ns,
497 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
498 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
499 		}
500 
501 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
502 			      acrtc->crtc_id,
503 			      vrr_active);
504 
505 		/* Core vblank handling is done here after end of front-porch in
506 		 * vrr mode, as vblank timestamping will give valid results
507 		 * while now done after front-porch. This will also deliver
508 		 * page-flip completion events that have been queued to us
509 		 * if a pageflip happened inside front-porch.
510 		 */
511 		if (vrr_active) {
512 			drm_crtc_handle_vblank(&acrtc->base);
513 
514 			/* BTR processing for pre-DCE12 ASICs */
515 			if (acrtc->dm_irq_params.stream &&
516 			    adev->family < AMDGPU_FAMILY_AI) {
517 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
518 				mod_freesync_handle_v_update(
519 				    adev->dm.freesync_module,
520 				    acrtc->dm_irq_params.stream,
521 				    &acrtc->dm_irq_params.vrr_params);
522 
523 				dc_stream_adjust_vmin_vmax(
524 				    adev->dm.dc,
525 				    acrtc->dm_irq_params.stream,
526 				    &acrtc->dm_irq_params.vrr_params.adjust);
527 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
528 			}
529 		}
530 	}
531 }
532 
533 /**
534  * dm_crtc_high_irq() - Handles CRTC interrupt
535  * @interrupt_params: used for determining the CRTC instance
536  *
537  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
538  * event handler.
539  */
540 static void dm_crtc_high_irq(void *interrupt_params)
541 {
542 	struct common_irq_params *irq_params = interrupt_params;
543 	struct amdgpu_device *adev = irq_params->adev;
544 	struct amdgpu_crtc *acrtc;
545 	unsigned long flags;
546 	int vrr_active;
547 
548 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
549 	if (!acrtc)
550 		return;
551 
552 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
553 
554 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
555 		      vrr_active, acrtc->dm_irq_params.active_planes);
556 
557 	/**
558 	 * Core vblank handling at start of front-porch is only possible
559 	 * in non-vrr mode, as only there vblank timestamping will give
560 	 * valid results while done in front-porch. Otherwise defer it
561 	 * to dm_vupdate_high_irq after end of front-porch.
562 	 */
563 	if (!vrr_active)
564 		drm_crtc_handle_vblank(&acrtc->base);
565 
566 	/**
567 	 * Following stuff must happen at start of vblank, for crc
568 	 * computation and below-the-range btr support in vrr mode.
569 	 */
570 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
571 
572 	/* BTR updates need to happen before VUPDATE on Vega and above. */
573 	if (adev->family < AMDGPU_FAMILY_AI)
574 		return;
575 
576 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
577 
578 	if (acrtc->dm_irq_params.stream &&
579 	    acrtc->dm_irq_params.vrr_params.supported &&
580 	    acrtc->dm_irq_params.freesync_config.state ==
581 		    VRR_STATE_ACTIVE_VARIABLE) {
582 		mod_freesync_handle_v_update(adev->dm.freesync_module,
583 					     acrtc->dm_irq_params.stream,
584 					     &acrtc->dm_irq_params.vrr_params);
585 
586 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
587 					   &acrtc->dm_irq_params.vrr_params.adjust);
588 	}
589 
590 	/*
591 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
592 	 * In that case, pageflip completion interrupts won't fire and pageflip
593 	 * completion events won't get delivered. Prevent this by sending
594 	 * pending pageflip events from here if a flip is still pending.
595 	 *
596 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
597 	 * avoid race conditions between flip programming and completion,
598 	 * which could cause too early flip completion events.
599 	 */
600 	if (adev->family >= AMDGPU_FAMILY_RV &&
601 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
602 	    acrtc->dm_irq_params.active_planes == 0) {
603 		if (acrtc->event) {
604 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
605 			acrtc->event = NULL;
606 			drm_crtc_vblank_put(&acrtc->base);
607 		}
608 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
609 	}
610 
611 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
612 }
613 
614 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
615 /**
616  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
617  * DCN generation ASICs
618  * @interrupt_params: interrupt parameters
619  *
620  * Used to set crc window/read out crc value at vertical line 0 position
621  */
622 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
623 {
624 	struct common_irq_params *irq_params = interrupt_params;
625 	struct amdgpu_device *adev = irq_params->adev;
626 	struct amdgpu_crtc *acrtc;
627 
628 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
629 
630 	if (!acrtc)
631 		return;
632 
633 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
634 }
635 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
636 
637 /**
638  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
639  * @adev: amdgpu_device pointer
640  * @notify: dmub notification structure
641  *
642  * Dmub AUX or SET_CONFIG command completion processing callback
643  * Copies dmub notification to DM which is to be read by AUX command.
644  * issuing thread and also signals the event to wake up the thread.
645  */
646 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
647 					struct dmub_notification *notify)
648 {
649 	if (adev->dm.dmub_notify)
650 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
651 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
652 		complete(&adev->dm.dmub_aux_transfer_done);
653 }
654 
655 /**
656  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
657  * @adev: amdgpu_device pointer
658  * @notify: dmub notification structure
659  *
660  * Dmub Hpd interrupt processing callback. Gets displayindex through the
661  * ink index and calls helper to do the processing.
662  */
663 static void dmub_hpd_callback(struct amdgpu_device *adev,
664 			      struct dmub_notification *notify)
665 {
666 	struct amdgpu_dm_connector *aconnector;
667 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
668 	struct drm_connector *connector;
669 	struct drm_connector_list_iter iter;
670 	struct dc_link *link;
671 	uint8_t link_index = 0;
672 	struct drm_device *dev;
673 
674 	if (adev == NULL)
675 		return;
676 
677 	if (notify == NULL) {
678 		DRM_ERROR("DMUB HPD callback notification was NULL");
679 		return;
680 	}
681 
682 	if (notify->link_index > adev->dm.dc->link_count) {
683 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
684 		return;
685 	}
686 
687 	link_index = notify->link_index;
688 	link = adev->dm.dc->links[link_index];
689 	dev = adev->dm.ddev;
690 
691 	drm_connector_list_iter_begin(dev, &iter);
692 	drm_for_each_connector_iter(connector, &iter) {
693 		aconnector = to_amdgpu_dm_connector(connector);
694 		if (link && aconnector->dc_link == link) {
695 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
696 			hpd_aconnector = aconnector;
697 			break;
698 		}
699 	}
700 	drm_connector_list_iter_end(&iter);
701 
702 	if (hpd_aconnector) {
703 		if (notify->type == DMUB_NOTIFICATION_HPD)
704 			handle_hpd_irq_helper(hpd_aconnector);
705 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
706 			handle_hpd_rx_irq(hpd_aconnector);
707 	}
708 }
709 
710 /**
711  * register_dmub_notify_callback - Sets callback for DMUB notify
712  * @adev: amdgpu_device pointer
713  * @type: Type of dmub notification
714  * @callback: Dmub interrupt callback function
715  * @dmub_int_thread_offload: offload indicator
716  *
717  * API to register a dmub callback handler for a dmub notification
718  * Also sets indicator whether callback processing to be offloaded.
719  * to dmub interrupt handling thread
720  * Return: true if successfully registered, false if there is existing registration
721  */
722 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
723 					  enum dmub_notification_type type,
724 					  dmub_notify_interrupt_callback_t callback,
725 					  bool dmub_int_thread_offload)
726 {
727 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
728 		adev->dm.dmub_callback[type] = callback;
729 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
730 	} else
731 		return false;
732 
733 	return true;
734 }
735 
736 static void dm_handle_hpd_work(struct work_struct *work)
737 {
738 	struct dmub_hpd_work *dmub_hpd_wrk;
739 
740 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
741 
742 	if (!dmub_hpd_wrk->dmub_notify) {
743 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
744 		return;
745 	}
746 
747 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
748 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
749 		dmub_hpd_wrk->dmub_notify);
750 	}
751 
752 	kfree(dmub_hpd_wrk->dmub_notify);
753 	kfree(dmub_hpd_wrk);
754 
755 }
756 
757 #define DMUB_TRACE_MAX_READ 64
758 /**
759  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
760  * @interrupt_params: used for determining the Outbox instance
761  *
762  * Handles the Outbox Interrupt
763  * event handler.
764  */
765 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
766 {
767 	struct dmub_notification notify;
768 	struct common_irq_params *irq_params = interrupt_params;
769 	struct amdgpu_device *adev = irq_params->adev;
770 	struct amdgpu_display_manager *dm = &adev->dm;
771 	struct dmcub_trace_buf_entry entry = { 0 };
772 	uint32_t count = 0;
773 	struct dmub_hpd_work *dmub_hpd_wrk;
774 	struct dc_link *plink = NULL;
775 
776 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
777 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
778 
779 		do {
780 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
781 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
782 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
783 				continue;
784 			}
785 			if (!dm->dmub_callback[notify.type]) {
786 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
787 				continue;
788 			}
789 			if (dm->dmub_thread_offload[notify.type] == true) {
790 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
791 				if (!dmub_hpd_wrk) {
792 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
793 					return;
794 				}
795 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
796 				if (!dmub_hpd_wrk->dmub_notify) {
797 					kfree(dmub_hpd_wrk);
798 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
799 					return;
800 				}
801 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
802 				if (dmub_hpd_wrk->dmub_notify)
803 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
804 				dmub_hpd_wrk->adev = adev;
805 				if (notify.type == DMUB_NOTIFICATION_HPD) {
806 					plink = adev->dm.dc->links[notify.link_index];
807 					if (plink) {
808 						plink->hpd_status =
809 							notify.hpd_status == DP_HPD_PLUG;
810 					}
811 				}
812 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
813 			} else {
814 				dm->dmub_callback[notify.type](adev, &notify);
815 			}
816 		} while (notify.pending_notification);
817 	}
818 
819 
820 	do {
821 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
822 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
823 							entry.param0, entry.param1);
824 
825 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
826 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
827 		} else
828 			break;
829 
830 		count++;
831 
832 	} while (count <= DMUB_TRACE_MAX_READ);
833 
834 	if (count > DMUB_TRACE_MAX_READ)
835 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
836 }
837 
838 static int dm_set_clockgating_state(void *handle,
839 		  enum amd_clockgating_state state)
840 {
841 	return 0;
842 }
843 
844 static int dm_set_powergating_state(void *handle,
845 		  enum amd_powergating_state state)
846 {
847 	return 0;
848 }
849 
850 /* Prototypes of private functions */
851 static int dm_early_init(void* handle);
852 
853 /* Allocate memory for FBC compressed data  */
854 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
855 {
856 	struct drm_device *dev = connector->dev;
857 	struct amdgpu_device *adev = drm_to_adev(dev);
858 	struct dm_compressor_info *compressor = &adev->dm.compressor;
859 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
860 	struct drm_display_mode *mode;
861 	unsigned long max_size = 0;
862 
863 	if (adev->dm.dc->fbc_compressor == NULL)
864 		return;
865 
866 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
867 		return;
868 
869 	if (compressor->bo_ptr)
870 		return;
871 
872 
873 	list_for_each_entry(mode, &connector->modes, head) {
874 		if (max_size < mode->htotal * mode->vtotal)
875 			max_size = mode->htotal * mode->vtotal;
876 	}
877 
878 	if (max_size) {
879 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
880 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
881 			    &compressor->gpu_addr, &compressor->cpu_addr);
882 
883 		if (r)
884 			DRM_ERROR("DM: Failed to initialize FBC\n");
885 		else {
886 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
887 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
888 		}
889 
890 	}
891 
892 }
893 
894 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
895 					  int pipe, bool *enabled,
896 					  unsigned char *buf, int max_bytes)
897 {
898 	struct drm_device *dev = dev_get_drvdata(kdev);
899 	struct amdgpu_device *adev = drm_to_adev(dev);
900 	struct drm_connector *connector;
901 	struct drm_connector_list_iter conn_iter;
902 	struct amdgpu_dm_connector *aconnector;
903 	int ret = 0;
904 
905 	*enabled = false;
906 
907 	mutex_lock(&adev->dm.audio_lock);
908 
909 	drm_connector_list_iter_begin(dev, &conn_iter);
910 	drm_for_each_connector_iter(connector, &conn_iter) {
911 		aconnector = to_amdgpu_dm_connector(connector);
912 		if (aconnector->audio_inst != port)
913 			continue;
914 
915 		*enabled = true;
916 		ret = drm_eld_size(connector->eld);
917 		memcpy(buf, connector->eld, min(max_bytes, ret));
918 
919 		break;
920 	}
921 	drm_connector_list_iter_end(&conn_iter);
922 
923 	mutex_unlock(&adev->dm.audio_lock);
924 
925 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
926 
927 	return ret;
928 }
929 
930 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
931 	.get_eld = amdgpu_dm_audio_component_get_eld,
932 };
933 
934 static int amdgpu_dm_audio_component_bind(struct device *kdev,
935 				       struct device *hda_kdev, void *data)
936 {
937 	struct drm_device *dev = dev_get_drvdata(kdev);
938 	struct amdgpu_device *adev = drm_to_adev(dev);
939 	struct drm_audio_component *acomp = data;
940 
941 	acomp->ops = &amdgpu_dm_audio_component_ops;
942 	acomp->dev = kdev;
943 	adev->dm.audio_component = acomp;
944 
945 	return 0;
946 }
947 
948 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
949 					  struct device *hda_kdev, void *data)
950 {
951 	struct drm_device *dev = dev_get_drvdata(kdev);
952 	struct amdgpu_device *adev = drm_to_adev(dev);
953 	struct drm_audio_component *acomp = data;
954 
955 	acomp->ops = NULL;
956 	acomp->dev = NULL;
957 	adev->dm.audio_component = NULL;
958 }
959 
960 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
961 	.bind	= amdgpu_dm_audio_component_bind,
962 	.unbind	= amdgpu_dm_audio_component_unbind,
963 };
964 
965 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
966 {
967 	int i, ret;
968 
969 	if (!amdgpu_audio)
970 		return 0;
971 
972 	adev->mode_info.audio.enabled = true;
973 
974 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
975 
976 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
977 		adev->mode_info.audio.pin[i].channels = -1;
978 		adev->mode_info.audio.pin[i].rate = -1;
979 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
980 		adev->mode_info.audio.pin[i].status_bits = 0;
981 		adev->mode_info.audio.pin[i].category_code = 0;
982 		adev->mode_info.audio.pin[i].connected = false;
983 		adev->mode_info.audio.pin[i].id =
984 			adev->dm.dc->res_pool->audios[i]->inst;
985 		adev->mode_info.audio.pin[i].offset = 0;
986 	}
987 
988 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
989 	if (ret < 0)
990 		return ret;
991 
992 	adev->dm.audio_registered = true;
993 
994 	return 0;
995 }
996 
997 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
998 {
999 	if (!amdgpu_audio)
1000 		return;
1001 
1002 	if (!adev->mode_info.audio.enabled)
1003 		return;
1004 
1005 	if (adev->dm.audio_registered) {
1006 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1007 		adev->dm.audio_registered = false;
1008 	}
1009 
1010 	/* TODO: Disable audio? */
1011 
1012 	adev->mode_info.audio.enabled = false;
1013 }
1014 
1015 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1016 {
1017 	struct drm_audio_component *acomp = adev->dm.audio_component;
1018 
1019 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1020 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1021 
1022 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1023 						 pin, -1);
1024 	}
1025 }
1026 
1027 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1028 {
1029 	const struct dmcub_firmware_header_v1_0 *hdr;
1030 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1031 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1032 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1033 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1034 	struct abm *abm = adev->dm.dc->res_pool->abm;
1035 	struct dmub_srv_hw_params hw_params;
1036 	enum dmub_status status;
1037 	const unsigned char *fw_inst_const, *fw_bss_data;
1038 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1039 	bool has_hw_support;
1040 
1041 	if (!dmub_srv)
1042 		/* DMUB isn't supported on the ASIC. */
1043 		return 0;
1044 
1045 	if (!fb_info) {
1046 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1047 		return -EINVAL;
1048 	}
1049 
1050 	if (!dmub_fw) {
1051 		/* Firmware required for DMUB support. */
1052 		DRM_ERROR("No firmware provided for DMUB.\n");
1053 		return -EINVAL;
1054 	}
1055 
1056 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1057 	if (status != DMUB_STATUS_OK) {
1058 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1059 		return -EINVAL;
1060 	}
1061 
1062 	if (!has_hw_support) {
1063 		DRM_INFO("DMUB unsupported on ASIC\n");
1064 		return 0;
1065 	}
1066 
1067 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1068 	status = dmub_srv_hw_reset(dmub_srv);
1069 	if (status != DMUB_STATUS_OK)
1070 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1071 
1072 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1073 
1074 	fw_inst_const = dmub_fw->data +
1075 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1076 			PSP_HEADER_BYTES;
1077 
1078 	fw_bss_data = dmub_fw->data +
1079 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1080 		      le32_to_cpu(hdr->inst_const_bytes);
1081 
1082 	/* Copy firmware and bios info into FB memory. */
1083 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1084 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1085 
1086 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1087 
1088 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1089 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1090 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1091 	 * will be done by dm_dmub_hw_init
1092 	 */
1093 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1094 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1095 				fw_inst_const_size);
1096 	}
1097 
1098 	if (fw_bss_data_size)
1099 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1100 		       fw_bss_data, fw_bss_data_size);
1101 
1102 	/* Copy firmware bios info into FB memory. */
1103 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1104 	       adev->bios_size);
1105 
1106 	/* Reset regions that need to be reset. */
1107 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1108 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1109 
1110 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1111 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1112 
1113 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1114 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1115 
1116 	/* Initialize hardware. */
1117 	memset(&hw_params, 0, sizeof(hw_params));
1118 	hw_params.fb_base = adev->gmc.fb_start;
1119 	hw_params.fb_offset = adev->gmc.aper_base;
1120 
1121 	/* backdoor load firmware and trigger dmub running */
1122 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1123 		hw_params.load_inst_const = true;
1124 
1125 	if (dmcu)
1126 		hw_params.psp_version = dmcu->psp_version;
1127 
1128 	for (i = 0; i < fb_info->num_fb; ++i)
1129 		hw_params.fb[i] = &fb_info->fb[i];
1130 
1131 	switch (adev->ip_versions[DCE_HWIP][0]) {
1132 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1133 		hw_params.dpia_supported = true;
1134 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1135 		break;
1136 	default:
1137 		break;
1138 	}
1139 
1140 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1141 	if (status != DMUB_STATUS_OK) {
1142 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1143 		return -EINVAL;
1144 	}
1145 
1146 	/* Wait for firmware load to finish. */
1147 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1148 	if (status != DMUB_STATUS_OK)
1149 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1150 
1151 	/* Init DMCU and ABM if available. */
1152 	if (dmcu && abm) {
1153 		dmcu->funcs->dmcu_init(dmcu);
1154 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1155 	}
1156 
1157 	if (!adev->dm.dc->ctx->dmub_srv)
1158 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1159 	if (!adev->dm.dc->ctx->dmub_srv) {
1160 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1161 		return -ENOMEM;
1162 	}
1163 
1164 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1165 		 adev->dm.dmcub_fw_version);
1166 
1167 	return 0;
1168 }
1169 
1170 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1171 {
1172 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1173 	enum dmub_status status;
1174 	bool init;
1175 
1176 	if (!dmub_srv) {
1177 		/* DMUB isn't supported on the ASIC. */
1178 		return;
1179 	}
1180 
1181 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1182 	if (status != DMUB_STATUS_OK)
1183 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1184 
1185 	if (status == DMUB_STATUS_OK && init) {
1186 		/* Wait for firmware load to finish. */
1187 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1188 		if (status != DMUB_STATUS_OK)
1189 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1190 	} else {
1191 		/* Perform the full hardware initialization. */
1192 		dm_dmub_hw_init(adev);
1193 	}
1194 }
1195 
1196 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1197 {
1198 	uint64_t pt_base;
1199 	uint32_t logical_addr_low;
1200 	uint32_t logical_addr_high;
1201 	uint32_t agp_base, agp_bot, agp_top;
1202 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1203 
1204 	memset(pa_config, 0, sizeof(*pa_config));
1205 
1206 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1207 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1208 
1209 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1210 		/*
1211 		 * Raven2 has a HW issue that it is unable to use the vram which
1212 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1213 		 * workaround that increase system aperture high address (add 1)
1214 		 * to get rid of the VM fault and hardware hang.
1215 		 */
1216 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1217 	else
1218 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1219 
1220 	agp_base = 0;
1221 	agp_bot = adev->gmc.agp_start >> 24;
1222 	agp_top = adev->gmc.agp_end >> 24;
1223 
1224 
1225 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1226 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1227 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1228 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1229 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1230 	page_table_base.low_part = lower_32_bits(pt_base);
1231 
1232 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1233 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1234 
1235 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1236 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1237 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1238 
1239 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1240 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1241 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1242 
1243 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1244 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1245 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1246 
1247 	pa_config->is_hvm_enabled = 0;
1248 
1249 }
1250 
1251 static void vblank_control_worker(struct work_struct *work)
1252 {
1253 	struct vblank_control_work *vblank_work =
1254 		container_of(work, struct vblank_control_work, work);
1255 	struct amdgpu_display_manager *dm = vblank_work->dm;
1256 
1257 	mutex_lock(&dm->dc_lock);
1258 
1259 	if (vblank_work->enable)
1260 		dm->active_vblank_irq_count++;
1261 	else if(dm->active_vblank_irq_count)
1262 		dm->active_vblank_irq_count--;
1263 
1264 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1265 
1266 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1267 
1268 	/* Control PSR based on vblank requirements from OS */
1269 	if (vblank_work->stream && vblank_work->stream->link) {
1270 		if (vblank_work->enable) {
1271 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1272 				amdgpu_dm_psr_disable(vblank_work->stream);
1273 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1274 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1275 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1276 			amdgpu_dm_psr_enable(vblank_work->stream);
1277 		}
1278 	}
1279 
1280 	mutex_unlock(&dm->dc_lock);
1281 
1282 	dc_stream_release(vblank_work->stream);
1283 
1284 	kfree(vblank_work);
1285 }
1286 
1287 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1288 {
1289 	struct hpd_rx_irq_offload_work *offload_work;
1290 	struct amdgpu_dm_connector *aconnector;
1291 	struct dc_link *dc_link;
1292 	struct amdgpu_device *adev;
1293 	enum dc_connection_type new_connection_type = dc_connection_none;
1294 	unsigned long flags;
1295 
1296 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1297 	aconnector = offload_work->offload_wq->aconnector;
1298 
1299 	if (!aconnector) {
1300 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1301 		goto skip;
1302 	}
1303 
1304 	adev = drm_to_adev(aconnector->base.dev);
1305 	dc_link = aconnector->dc_link;
1306 
1307 	mutex_lock(&aconnector->hpd_lock);
1308 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1309 		DRM_ERROR("KMS: Failed to detect connector\n");
1310 	mutex_unlock(&aconnector->hpd_lock);
1311 
1312 	if (new_connection_type == dc_connection_none)
1313 		goto skip;
1314 
1315 	if (amdgpu_in_reset(adev))
1316 		goto skip;
1317 
1318 	mutex_lock(&adev->dm.dc_lock);
1319 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1320 		dc_link_dp_handle_automated_test(dc_link);
1321 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1322 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1323 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1324 		dc_link_dp_handle_link_loss(dc_link);
1325 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1326 		offload_work->offload_wq->is_handling_link_loss = false;
1327 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1328 	}
1329 	mutex_unlock(&adev->dm.dc_lock);
1330 
1331 skip:
1332 	kfree(offload_work);
1333 
1334 }
1335 
1336 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1337 {
1338 	int max_caps = dc->caps.max_links;
1339 	int i = 0;
1340 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1341 
1342 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1343 
1344 	if (!hpd_rx_offload_wq)
1345 		return NULL;
1346 
1347 
1348 	for (i = 0; i < max_caps; i++) {
1349 		hpd_rx_offload_wq[i].wq =
1350 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1351 
1352 		if (hpd_rx_offload_wq[i].wq == NULL) {
1353 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1354 			return NULL;
1355 		}
1356 
1357 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1358 	}
1359 
1360 	return hpd_rx_offload_wq;
1361 }
1362 
1363 struct amdgpu_stutter_quirk {
1364 	u16 chip_vendor;
1365 	u16 chip_device;
1366 	u16 subsys_vendor;
1367 	u16 subsys_device;
1368 	u8 revision;
1369 };
1370 
1371 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1372 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1373 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1374 	{ 0, 0, 0, 0, 0 },
1375 };
1376 
1377 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1378 {
1379 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1380 
1381 	while (p && p->chip_device != 0) {
1382 		if (pdev->vendor == p->chip_vendor &&
1383 		    pdev->device == p->chip_device &&
1384 		    pdev->subsystem_vendor == p->subsys_vendor &&
1385 		    pdev->subsystem_device == p->subsys_device &&
1386 		    pdev->revision == p->revision) {
1387 			return true;
1388 		}
1389 		++p;
1390 	}
1391 	return false;
1392 }
1393 
1394 static int amdgpu_dm_init(struct amdgpu_device *adev)
1395 {
1396 	struct dc_init_data init_data;
1397 #ifdef CONFIG_DRM_AMD_DC_HDCP
1398 	struct dc_callback_init init_params;
1399 #endif
1400 	int r;
1401 
1402 	adev->dm.ddev = adev_to_drm(adev);
1403 	adev->dm.adev = adev;
1404 
1405 	/* Zero all the fields */
1406 	memset(&init_data, 0, sizeof(init_data));
1407 #ifdef CONFIG_DRM_AMD_DC_HDCP
1408 	memset(&init_params, 0, sizeof(init_params));
1409 #endif
1410 
1411 	mutex_init(&adev->dm.dc_lock);
1412 	mutex_init(&adev->dm.audio_lock);
1413 	spin_lock_init(&adev->dm.vblank_lock);
1414 
1415 	if(amdgpu_dm_irq_init(adev)) {
1416 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1417 		goto error;
1418 	}
1419 
1420 	init_data.asic_id.chip_family = adev->family;
1421 
1422 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1423 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1424 	init_data.asic_id.chip_id = adev->pdev->device;
1425 
1426 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1427 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1428 	init_data.asic_id.atombios_base_address =
1429 		adev->mode_info.atom_context->bios;
1430 
1431 	init_data.driver = adev;
1432 
1433 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1434 
1435 	if (!adev->dm.cgs_device) {
1436 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1437 		goto error;
1438 	}
1439 
1440 	init_data.cgs_device = adev->dm.cgs_device;
1441 
1442 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1443 
1444 	switch (adev->ip_versions[DCE_HWIP][0]) {
1445 	case IP_VERSION(2, 1, 0):
1446 		switch (adev->dm.dmcub_fw_version) {
1447 		case 0: /* development */
1448 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1449 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1450 			init_data.flags.disable_dmcu = false;
1451 			break;
1452 		default:
1453 			init_data.flags.disable_dmcu = true;
1454 		}
1455 		break;
1456 	case IP_VERSION(2, 0, 3):
1457 		init_data.flags.disable_dmcu = true;
1458 		break;
1459 	default:
1460 		break;
1461 	}
1462 
1463 	switch (adev->asic_type) {
1464 	case CHIP_CARRIZO:
1465 	case CHIP_STONEY:
1466 		init_data.flags.gpu_vm_support = true;
1467 		break;
1468 	default:
1469 		switch (adev->ip_versions[DCE_HWIP][0]) {
1470 		case IP_VERSION(1, 0, 0):
1471 		case IP_VERSION(1, 0, 1):
1472 			/* enable S/G on PCO and RV2 */
1473 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1474 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1475 				init_data.flags.gpu_vm_support = true;
1476 			break;
1477 		case IP_VERSION(2, 1, 0):
1478 		case IP_VERSION(3, 0, 1):
1479 		case IP_VERSION(3, 1, 2):
1480 		case IP_VERSION(3, 1, 3):
1481 		case IP_VERSION(3, 1, 5):
1482 		case IP_VERSION(3, 1, 6):
1483 			init_data.flags.gpu_vm_support = true;
1484 			break;
1485 		default:
1486 			break;
1487 		}
1488 		break;
1489 	}
1490 
1491 	if (init_data.flags.gpu_vm_support)
1492 		adev->mode_info.gpu_vm_support = true;
1493 
1494 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1495 		init_data.flags.fbc_support = true;
1496 
1497 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1498 		init_data.flags.multi_mon_pp_mclk_switch = true;
1499 
1500 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1501 		init_data.flags.disable_fractional_pwm = true;
1502 
1503 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1504 		init_data.flags.edp_no_power_sequencing = true;
1505 
1506 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1507 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1508 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1509 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1510 
1511 	init_data.flags.seamless_boot_edp_requested = false;
1512 
1513 	if (check_seamless_boot_capability(adev)) {
1514 		init_data.flags.seamless_boot_edp_requested = true;
1515 		init_data.flags.allow_seamless_boot_optimization = true;
1516 		DRM_INFO("Seamless boot condition check passed\n");
1517 	}
1518 
1519 	init_data.flags.enable_mipi_converter_optimization = true;
1520 
1521 	INIT_LIST_HEAD(&adev->dm.da_list);
1522 	/* Display Core create. */
1523 	adev->dm.dc = dc_create(&init_data);
1524 
1525 	if (adev->dm.dc) {
1526 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1527 	} else {
1528 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1529 		goto error;
1530 	}
1531 
1532 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1533 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1534 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1535 	}
1536 
1537 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1538 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1539 	if (dm_should_disable_stutter(adev->pdev))
1540 		adev->dm.dc->debug.disable_stutter = true;
1541 
1542 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1543 		adev->dm.dc->debug.disable_stutter = true;
1544 
1545 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1546 		adev->dm.dc->debug.disable_dsc = true;
1547 		adev->dm.dc->debug.disable_dsc_edp = true;
1548 	}
1549 
1550 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1551 		adev->dm.dc->debug.disable_clock_gate = true;
1552 
1553 	r = dm_dmub_hw_init(adev);
1554 	if (r) {
1555 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1556 		goto error;
1557 	}
1558 
1559 	dc_hardware_init(adev->dm.dc);
1560 
1561 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1562 	if (!adev->dm.hpd_rx_offload_wq) {
1563 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1564 		goto error;
1565 	}
1566 
1567 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1568 		struct dc_phy_addr_space_config pa_config;
1569 
1570 		mmhub_read_system_context(adev, &pa_config);
1571 
1572 		// Call the DC init_memory func
1573 		dc_setup_system_context(adev->dm.dc, &pa_config);
1574 	}
1575 
1576 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1577 	if (!adev->dm.freesync_module) {
1578 		DRM_ERROR(
1579 		"amdgpu: failed to initialize freesync_module.\n");
1580 	} else
1581 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1582 				adev->dm.freesync_module);
1583 
1584 	amdgpu_dm_init_color_mod();
1585 
1586 	if (adev->dm.dc->caps.max_links > 0) {
1587 		adev->dm.vblank_control_workqueue =
1588 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1589 		if (!adev->dm.vblank_control_workqueue)
1590 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1591 	}
1592 
1593 #ifdef CONFIG_DRM_AMD_DC_HDCP
1594 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1595 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1596 
1597 		if (!adev->dm.hdcp_workqueue)
1598 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1599 		else
1600 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1601 
1602 		dc_init_callbacks(adev->dm.dc, &init_params);
1603 	}
1604 #endif
1605 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1606 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1607 #endif
1608 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1609 		init_completion(&adev->dm.dmub_aux_transfer_done);
1610 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1611 		if (!adev->dm.dmub_notify) {
1612 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1613 			goto error;
1614 		}
1615 
1616 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1617 		if (!adev->dm.delayed_hpd_wq) {
1618 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1619 			goto error;
1620 		}
1621 
1622 		amdgpu_dm_outbox_init(adev);
1623 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1624 			dmub_aux_setconfig_callback, false)) {
1625 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1626 			goto error;
1627 		}
1628 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1629 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1630 			goto error;
1631 		}
1632 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1633 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1634 			goto error;
1635 		}
1636 	}
1637 
1638 	if (amdgpu_dm_initialize_drm_device(adev)) {
1639 		DRM_ERROR(
1640 		"amdgpu: failed to initialize sw for display support.\n");
1641 		goto error;
1642 	}
1643 
1644 	/* create fake encoders for MST */
1645 	dm_dp_create_fake_mst_encoders(adev);
1646 
1647 	/* TODO: Add_display_info? */
1648 
1649 	/* TODO use dynamic cursor width */
1650 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1651 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1652 
1653 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1654 		DRM_ERROR(
1655 		"amdgpu: failed to initialize sw for display support.\n");
1656 		goto error;
1657 	}
1658 
1659 
1660 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1661 
1662 	return 0;
1663 error:
1664 	amdgpu_dm_fini(adev);
1665 
1666 	return -EINVAL;
1667 }
1668 
1669 static int amdgpu_dm_early_fini(void *handle)
1670 {
1671 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1672 
1673 	amdgpu_dm_audio_fini(adev);
1674 
1675 	return 0;
1676 }
1677 
1678 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1679 {
1680 	int i;
1681 
1682 	if (adev->dm.vblank_control_workqueue) {
1683 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1684 		adev->dm.vblank_control_workqueue = NULL;
1685 	}
1686 
1687 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1688 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1689 	}
1690 
1691 	amdgpu_dm_destroy_drm_device(&adev->dm);
1692 
1693 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1694 	if (adev->dm.crc_rd_wrk) {
1695 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1696 		kfree(adev->dm.crc_rd_wrk);
1697 		adev->dm.crc_rd_wrk = NULL;
1698 	}
1699 #endif
1700 #ifdef CONFIG_DRM_AMD_DC_HDCP
1701 	if (adev->dm.hdcp_workqueue) {
1702 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1703 		adev->dm.hdcp_workqueue = NULL;
1704 	}
1705 
1706 	if (adev->dm.dc)
1707 		dc_deinit_callbacks(adev->dm.dc);
1708 #endif
1709 
1710 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1711 
1712 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1713 		kfree(adev->dm.dmub_notify);
1714 		adev->dm.dmub_notify = NULL;
1715 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1716 		adev->dm.delayed_hpd_wq = NULL;
1717 	}
1718 
1719 	if (adev->dm.dmub_bo)
1720 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1721 				      &adev->dm.dmub_bo_gpu_addr,
1722 				      &adev->dm.dmub_bo_cpu_addr);
1723 
1724 	if (adev->dm.hpd_rx_offload_wq) {
1725 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1726 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1727 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1728 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1729 			}
1730 		}
1731 
1732 		kfree(adev->dm.hpd_rx_offload_wq);
1733 		adev->dm.hpd_rx_offload_wq = NULL;
1734 	}
1735 
1736 	/* DC Destroy TODO: Replace destroy DAL */
1737 	if (adev->dm.dc)
1738 		dc_destroy(&adev->dm.dc);
1739 	/*
1740 	 * TODO: pageflip, vlank interrupt
1741 	 *
1742 	 * amdgpu_dm_irq_fini(adev);
1743 	 */
1744 
1745 	if (adev->dm.cgs_device) {
1746 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1747 		adev->dm.cgs_device = NULL;
1748 	}
1749 	if (adev->dm.freesync_module) {
1750 		mod_freesync_destroy(adev->dm.freesync_module);
1751 		adev->dm.freesync_module = NULL;
1752 	}
1753 
1754 	mutex_destroy(&adev->dm.audio_lock);
1755 	mutex_destroy(&adev->dm.dc_lock);
1756 
1757 	return;
1758 }
1759 
1760 static int load_dmcu_fw(struct amdgpu_device *adev)
1761 {
1762 	const char *fw_name_dmcu = NULL;
1763 	int r;
1764 	const struct dmcu_firmware_header_v1_0 *hdr;
1765 
1766 	switch(adev->asic_type) {
1767 #if defined(CONFIG_DRM_AMD_DC_SI)
1768 	case CHIP_TAHITI:
1769 	case CHIP_PITCAIRN:
1770 	case CHIP_VERDE:
1771 	case CHIP_OLAND:
1772 #endif
1773 	case CHIP_BONAIRE:
1774 	case CHIP_HAWAII:
1775 	case CHIP_KAVERI:
1776 	case CHIP_KABINI:
1777 	case CHIP_MULLINS:
1778 	case CHIP_TONGA:
1779 	case CHIP_FIJI:
1780 	case CHIP_CARRIZO:
1781 	case CHIP_STONEY:
1782 	case CHIP_POLARIS11:
1783 	case CHIP_POLARIS10:
1784 	case CHIP_POLARIS12:
1785 	case CHIP_VEGAM:
1786 	case CHIP_VEGA10:
1787 	case CHIP_VEGA12:
1788 	case CHIP_VEGA20:
1789 		return 0;
1790 	case CHIP_NAVI12:
1791 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1792 		break;
1793 	case CHIP_RAVEN:
1794 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1795 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1796 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1797 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1798 		else
1799 			return 0;
1800 		break;
1801 	default:
1802 		switch (adev->ip_versions[DCE_HWIP][0]) {
1803 		case IP_VERSION(2, 0, 2):
1804 		case IP_VERSION(2, 0, 3):
1805 		case IP_VERSION(2, 0, 0):
1806 		case IP_VERSION(2, 1, 0):
1807 		case IP_VERSION(3, 0, 0):
1808 		case IP_VERSION(3, 0, 2):
1809 		case IP_VERSION(3, 0, 3):
1810 		case IP_VERSION(3, 0, 1):
1811 		case IP_VERSION(3, 1, 2):
1812 		case IP_VERSION(3, 1, 3):
1813 		case IP_VERSION(3, 1, 5):
1814 		case IP_VERSION(3, 1, 6):
1815 		case IP_VERSION(3, 2, 0):
1816 		case IP_VERSION(3, 2, 1):
1817 			return 0;
1818 		default:
1819 			break;
1820 		}
1821 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1822 		return -EINVAL;
1823 	}
1824 
1825 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1826 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1827 		return 0;
1828 	}
1829 
1830 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1831 	if (r == -ENOENT) {
1832 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1833 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1834 		adev->dm.fw_dmcu = NULL;
1835 		return 0;
1836 	}
1837 	if (r) {
1838 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1839 			fw_name_dmcu);
1840 		return r;
1841 	}
1842 
1843 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1844 	if (r) {
1845 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1846 			fw_name_dmcu);
1847 		release_firmware(adev->dm.fw_dmcu);
1848 		adev->dm.fw_dmcu = NULL;
1849 		return r;
1850 	}
1851 
1852 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1853 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1854 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1855 	adev->firmware.fw_size +=
1856 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1857 
1858 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1859 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1860 	adev->firmware.fw_size +=
1861 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1862 
1863 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1864 
1865 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1866 
1867 	return 0;
1868 }
1869 
1870 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1871 {
1872 	struct amdgpu_device *adev = ctx;
1873 
1874 	return dm_read_reg(adev->dm.dc->ctx, address);
1875 }
1876 
1877 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1878 				     uint32_t value)
1879 {
1880 	struct amdgpu_device *adev = ctx;
1881 
1882 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1883 }
1884 
1885 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1886 {
1887 	struct dmub_srv_create_params create_params;
1888 	struct dmub_srv_region_params region_params;
1889 	struct dmub_srv_region_info region_info;
1890 	struct dmub_srv_fb_params fb_params;
1891 	struct dmub_srv_fb_info *fb_info;
1892 	struct dmub_srv *dmub_srv;
1893 	const struct dmcub_firmware_header_v1_0 *hdr;
1894 	const char *fw_name_dmub;
1895 	enum dmub_asic dmub_asic;
1896 	enum dmub_status status;
1897 	int r;
1898 
1899 	switch (adev->ip_versions[DCE_HWIP][0]) {
1900 	case IP_VERSION(2, 1, 0):
1901 		dmub_asic = DMUB_ASIC_DCN21;
1902 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1903 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1904 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1905 		break;
1906 	case IP_VERSION(3, 0, 0):
1907 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1908 			dmub_asic = DMUB_ASIC_DCN30;
1909 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1910 		} else {
1911 			dmub_asic = DMUB_ASIC_DCN30;
1912 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1913 		}
1914 		break;
1915 	case IP_VERSION(3, 0, 1):
1916 		dmub_asic = DMUB_ASIC_DCN301;
1917 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1918 		break;
1919 	case IP_VERSION(3, 0, 2):
1920 		dmub_asic = DMUB_ASIC_DCN302;
1921 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1922 		break;
1923 	case IP_VERSION(3, 0, 3):
1924 		dmub_asic = DMUB_ASIC_DCN303;
1925 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1926 		break;
1927 	case IP_VERSION(3, 1, 2):
1928 	case IP_VERSION(3, 1, 3):
1929 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1930 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1931 		break;
1932 	case IP_VERSION(3, 1, 5):
1933 		dmub_asic = DMUB_ASIC_DCN315;
1934 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1935 		break;
1936 	case IP_VERSION(3, 1, 6):
1937 		dmub_asic = DMUB_ASIC_DCN316;
1938 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1939 		break;
1940 	case IP_VERSION(3, 2, 0):
1941 		dmub_asic = DMUB_ASIC_DCN32;
1942 		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1943 		break;
1944 	case IP_VERSION(3, 2, 1):
1945 		dmub_asic = DMUB_ASIC_DCN321;
1946 		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1947 		break;
1948 	default:
1949 		/* ASIC doesn't support DMUB. */
1950 		return 0;
1951 	}
1952 
1953 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1954 	if (r) {
1955 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1956 		return 0;
1957 	}
1958 
1959 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1960 	if (r) {
1961 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1962 		return 0;
1963 	}
1964 
1965 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1966 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1967 
1968 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1969 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1970 			AMDGPU_UCODE_ID_DMCUB;
1971 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1972 			adev->dm.dmub_fw;
1973 		adev->firmware.fw_size +=
1974 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1975 
1976 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1977 			 adev->dm.dmcub_fw_version);
1978 	}
1979 
1980 
1981 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1982 	dmub_srv = adev->dm.dmub_srv;
1983 
1984 	if (!dmub_srv) {
1985 		DRM_ERROR("Failed to allocate DMUB service!\n");
1986 		return -ENOMEM;
1987 	}
1988 
1989 	memset(&create_params, 0, sizeof(create_params));
1990 	create_params.user_ctx = adev;
1991 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1992 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1993 	create_params.asic = dmub_asic;
1994 
1995 	/* Create the DMUB service. */
1996 	status = dmub_srv_create(dmub_srv, &create_params);
1997 	if (status != DMUB_STATUS_OK) {
1998 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1999 		return -EINVAL;
2000 	}
2001 
2002 	/* Calculate the size of all the regions for the DMUB service. */
2003 	memset(&region_params, 0, sizeof(region_params));
2004 
2005 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2006 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2007 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2008 	region_params.vbios_size = adev->bios_size;
2009 	region_params.fw_bss_data = region_params.bss_data_size ?
2010 		adev->dm.dmub_fw->data +
2011 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2012 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2013 	region_params.fw_inst_const =
2014 		adev->dm.dmub_fw->data +
2015 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2016 		PSP_HEADER_BYTES;
2017 
2018 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2019 					   &region_info);
2020 
2021 	if (status != DMUB_STATUS_OK) {
2022 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2023 		return -EINVAL;
2024 	}
2025 
2026 	/*
2027 	 * Allocate a framebuffer based on the total size of all the regions.
2028 	 * TODO: Move this into GART.
2029 	 */
2030 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2031 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2032 				    &adev->dm.dmub_bo_gpu_addr,
2033 				    &adev->dm.dmub_bo_cpu_addr);
2034 	if (r)
2035 		return r;
2036 
2037 	/* Rebase the regions on the framebuffer address. */
2038 	memset(&fb_params, 0, sizeof(fb_params));
2039 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2040 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2041 	fb_params.region_info = &region_info;
2042 
2043 	adev->dm.dmub_fb_info =
2044 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2045 	fb_info = adev->dm.dmub_fb_info;
2046 
2047 	if (!fb_info) {
2048 		DRM_ERROR(
2049 			"Failed to allocate framebuffer info for DMUB service!\n");
2050 		return -ENOMEM;
2051 	}
2052 
2053 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2054 	if (status != DMUB_STATUS_OK) {
2055 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2056 		return -EINVAL;
2057 	}
2058 
2059 	return 0;
2060 }
2061 
2062 static int dm_sw_init(void *handle)
2063 {
2064 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2065 	int r;
2066 
2067 	r = dm_dmub_sw_init(adev);
2068 	if (r)
2069 		return r;
2070 
2071 	return load_dmcu_fw(adev);
2072 }
2073 
2074 static int dm_sw_fini(void *handle)
2075 {
2076 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2077 
2078 	kfree(adev->dm.dmub_fb_info);
2079 	adev->dm.dmub_fb_info = NULL;
2080 
2081 	if (adev->dm.dmub_srv) {
2082 		dmub_srv_destroy(adev->dm.dmub_srv);
2083 		adev->dm.dmub_srv = NULL;
2084 	}
2085 
2086 	release_firmware(adev->dm.dmub_fw);
2087 	adev->dm.dmub_fw = NULL;
2088 
2089 	release_firmware(adev->dm.fw_dmcu);
2090 	adev->dm.fw_dmcu = NULL;
2091 
2092 	return 0;
2093 }
2094 
2095 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2096 {
2097 	struct amdgpu_dm_connector *aconnector;
2098 	struct drm_connector *connector;
2099 	struct drm_connector_list_iter iter;
2100 	int ret = 0;
2101 
2102 	drm_connector_list_iter_begin(dev, &iter);
2103 	drm_for_each_connector_iter(connector, &iter) {
2104 		aconnector = to_amdgpu_dm_connector(connector);
2105 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2106 		    aconnector->mst_mgr.aux) {
2107 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2108 					 aconnector,
2109 					 aconnector->base.base.id);
2110 
2111 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2112 			if (ret < 0) {
2113 				DRM_ERROR("DM_MST: Failed to start MST\n");
2114 				aconnector->dc_link->type =
2115 					dc_connection_single;
2116 				break;
2117 			}
2118 		}
2119 	}
2120 	drm_connector_list_iter_end(&iter);
2121 
2122 	return ret;
2123 }
2124 
2125 static int dm_late_init(void *handle)
2126 {
2127 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2128 
2129 	struct dmcu_iram_parameters params;
2130 	unsigned int linear_lut[16];
2131 	int i;
2132 	struct dmcu *dmcu = NULL;
2133 
2134 	dmcu = adev->dm.dc->res_pool->dmcu;
2135 
2136 	for (i = 0; i < 16; i++)
2137 		linear_lut[i] = 0xFFFF * i / 15;
2138 
2139 	params.set = 0;
2140 	params.backlight_ramping_override = false;
2141 	params.backlight_ramping_start = 0xCCCC;
2142 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2143 	params.backlight_lut_array_size = 16;
2144 	params.backlight_lut_array = linear_lut;
2145 
2146 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2147 	 * 0xFFFF x 0.01 = 0x28F
2148 	 */
2149 	params.min_abm_backlight = 0x28F;
2150 	/* In the case where abm is implemented on dmcub,
2151 	* dmcu object will be null.
2152 	* ABM 2.4 and up are implemented on dmcub.
2153 	*/
2154 	if (dmcu) {
2155 		if (!dmcu_load_iram(dmcu, params))
2156 			return -EINVAL;
2157 	} else if (adev->dm.dc->ctx->dmub_srv) {
2158 		struct dc_link *edp_links[MAX_NUM_EDP];
2159 		int edp_num;
2160 
2161 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2162 		for (i = 0; i < edp_num; i++) {
2163 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2164 				return -EINVAL;
2165 		}
2166 	}
2167 
2168 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2169 }
2170 
2171 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2172 {
2173 	struct amdgpu_dm_connector *aconnector;
2174 	struct drm_connector *connector;
2175 	struct drm_connector_list_iter iter;
2176 	struct drm_dp_mst_topology_mgr *mgr;
2177 	int ret;
2178 	bool need_hotplug = false;
2179 
2180 	drm_connector_list_iter_begin(dev, &iter);
2181 	drm_for_each_connector_iter(connector, &iter) {
2182 		aconnector = to_amdgpu_dm_connector(connector);
2183 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2184 		    aconnector->mst_port)
2185 			continue;
2186 
2187 		mgr = &aconnector->mst_mgr;
2188 
2189 		if (suspend) {
2190 			drm_dp_mst_topology_mgr_suspend(mgr);
2191 		} else {
2192 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2193 			if (ret < 0) {
2194 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2195 				need_hotplug = true;
2196 			}
2197 		}
2198 	}
2199 	drm_connector_list_iter_end(&iter);
2200 
2201 	if (need_hotplug)
2202 		drm_kms_helper_hotplug_event(dev);
2203 }
2204 
2205 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2206 {
2207 	int ret = 0;
2208 
2209 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2210 	 * on window driver dc implementation.
2211 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2212 	 * should be passed to smu during boot up and resume from s3.
2213 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2214 	 * dcn20_resource_construct
2215 	 * then call pplib functions below to pass the settings to smu:
2216 	 * smu_set_watermarks_for_clock_ranges
2217 	 * smu_set_watermarks_table
2218 	 * navi10_set_watermarks_table
2219 	 * smu_write_watermarks_table
2220 	 *
2221 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2222 	 * dc has implemented different flow for window driver:
2223 	 * dc_hardware_init / dc_set_power_state
2224 	 * dcn10_init_hw
2225 	 * notify_wm_ranges
2226 	 * set_wm_ranges
2227 	 * -- Linux
2228 	 * smu_set_watermarks_for_clock_ranges
2229 	 * renoir_set_watermarks_table
2230 	 * smu_write_watermarks_table
2231 	 *
2232 	 * For Linux,
2233 	 * dc_hardware_init -> amdgpu_dm_init
2234 	 * dc_set_power_state --> dm_resume
2235 	 *
2236 	 * therefore, this function apply to navi10/12/14 but not Renoir
2237 	 * *
2238 	 */
2239 	switch (adev->ip_versions[DCE_HWIP][0]) {
2240 	case IP_VERSION(2, 0, 2):
2241 	case IP_VERSION(2, 0, 0):
2242 		break;
2243 	default:
2244 		return 0;
2245 	}
2246 
2247 	ret = amdgpu_dpm_write_watermarks_table(adev);
2248 	if (ret) {
2249 		DRM_ERROR("Failed to update WMTABLE!\n");
2250 		return ret;
2251 	}
2252 
2253 	return 0;
2254 }
2255 
2256 /**
2257  * dm_hw_init() - Initialize DC device
2258  * @handle: The base driver device containing the amdgpu_dm device.
2259  *
2260  * Initialize the &struct amdgpu_display_manager device. This involves calling
2261  * the initializers of each DM component, then populating the struct with them.
2262  *
2263  * Although the function implies hardware initialization, both hardware and
2264  * software are initialized here. Splitting them out to their relevant init
2265  * hooks is a future TODO item.
2266  *
2267  * Some notable things that are initialized here:
2268  *
2269  * - Display Core, both software and hardware
2270  * - DC modules that we need (freesync and color management)
2271  * - DRM software states
2272  * - Interrupt sources and handlers
2273  * - Vblank support
2274  * - Debug FS entries, if enabled
2275  */
2276 static int dm_hw_init(void *handle)
2277 {
2278 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2279 	/* Create DAL display manager */
2280 	amdgpu_dm_init(adev);
2281 	amdgpu_dm_hpd_init(adev);
2282 
2283 	return 0;
2284 }
2285 
2286 /**
2287  * dm_hw_fini() - Teardown DC device
2288  * @handle: The base driver device containing the amdgpu_dm device.
2289  *
2290  * Teardown components within &struct amdgpu_display_manager that require
2291  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2292  * were loaded. Also flush IRQ workqueues and disable them.
2293  */
2294 static int dm_hw_fini(void *handle)
2295 {
2296 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2297 
2298 	amdgpu_dm_hpd_fini(adev);
2299 
2300 	amdgpu_dm_irq_fini(adev);
2301 	amdgpu_dm_fini(adev);
2302 	return 0;
2303 }
2304 
2305 
2306 static int dm_enable_vblank(struct drm_crtc *crtc);
2307 static void dm_disable_vblank(struct drm_crtc *crtc);
2308 
2309 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2310 				 struct dc_state *state, bool enable)
2311 {
2312 	enum dc_irq_source irq_source;
2313 	struct amdgpu_crtc *acrtc;
2314 	int rc = -EBUSY;
2315 	int i = 0;
2316 
2317 	for (i = 0; i < state->stream_count; i++) {
2318 		acrtc = get_crtc_by_otg_inst(
2319 				adev, state->stream_status[i].primary_otg_inst);
2320 
2321 		if (acrtc && state->stream_status[i].plane_count != 0) {
2322 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2323 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2324 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2325 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2326 			if (rc)
2327 				DRM_WARN("Failed to %s pflip interrupts\n",
2328 					 enable ? "enable" : "disable");
2329 
2330 			if (enable) {
2331 				rc = dm_enable_vblank(&acrtc->base);
2332 				if (rc)
2333 					DRM_WARN("Failed to enable vblank interrupts\n");
2334 			} else {
2335 				dm_disable_vblank(&acrtc->base);
2336 			}
2337 
2338 		}
2339 	}
2340 
2341 }
2342 
2343 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2344 {
2345 	struct dc_state *context = NULL;
2346 	enum dc_status res = DC_ERROR_UNEXPECTED;
2347 	int i;
2348 	struct dc_stream_state *del_streams[MAX_PIPES];
2349 	int del_streams_count = 0;
2350 
2351 	memset(del_streams, 0, sizeof(del_streams));
2352 
2353 	context = dc_create_state(dc);
2354 	if (context == NULL)
2355 		goto context_alloc_fail;
2356 
2357 	dc_resource_state_copy_construct_current(dc, context);
2358 
2359 	/* First remove from context all streams */
2360 	for (i = 0; i < context->stream_count; i++) {
2361 		struct dc_stream_state *stream = context->streams[i];
2362 
2363 		del_streams[del_streams_count++] = stream;
2364 	}
2365 
2366 	/* Remove all planes for removed streams and then remove the streams */
2367 	for (i = 0; i < del_streams_count; i++) {
2368 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2369 			res = DC_FAIL_DETACH_SURFACES;
2370 			goto fail;
2371 		}
2372 
2373 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2374 		if (res != DC_OK)
2375 			goto fail;
2376 	}
2377 
2378 	res = dc_commit_state(dc, context);
2379 
2380 fail:
2381 	dc_release_state(context);
2382 
2383 context_alloc_fail:
2384 	return res;
2385 }
2386 
2387 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2388 {
2389 	int i;
2390 
2391 	if (dm->hpd_rx_offload_wq) {
2392 		for (i = 0; i < dm->dc->caps.max_links; i++)
2393 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2394 	}
2395 }
2396 
2397 static int dm_suspend(void *handle)
2398 {
2399 	struct amdgpu_device *adev = handle;
2400 	struct amdgpu_display_manager *dm = &adev->dm;
2401 	int ret = 0;
2402 
2403 	if (amdgpu_in_reset(adev)) {
2404 		mutex_lock(&dm->dc_lock);
2405 
2406 		dc_allow_idle_optimizations(adev->dm.dc, false);
2407 
2408 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2409 
2410 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2411 
2412 		amdgpu_dm_commit_zero_streams(dm->dc);
2413 
2414 		amdgpu_dm_irq_suspend(adev);
2415 
2416 		hpd_rx_irq_work_suspend(dm);
2417 
2418 		return ret;
2419 	}
2420 
2421 	WARN_ON(adev->dm.cached_state);
2422 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2423 
2424 	s3_handle_mst(adev_to_drm(adev), true);
2425 
2426 	amdgpu_dm_irq_suspend(adev);
2427 
2428 	hpd_rx_irq_work_suspend(dm);
2429 
2430 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2431 
2432 	return 0;
2433 }
2434 
2435 struct amdgpu_dm_connector *
2436 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2437 					     struct drm_crtc *crtc)
2438 {
2439 	uint32_t i;
2440 	struct drm_connector_state *new_con_state;
2441 	struct drm_connector *connector;
2442 	struct drm_crtc *crtc_from_state;
2443 
2444 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2445 		crtc_from_state = new_con_state->crtc;
2446 
2447 		if (crtc_from_state == crtc)
2448 			return to_amdgpu_dm_connector(connector);
2449 	}
2450 
2451 	return NULL;
2452 }
2453 
2454 static void emulated_link_detect(struct dc_link *link)
2455 {
2456 	struct dc_sink_init_data sink_init_data = { 0 };
2457 	struct display_sink_capability sink_caps = { 0 };
2458 	enum dc_edid_status edid_status;
2459 	struct dc_context *dc_ctx = link->ctx;
2460 	struct dc_sink *sink = NULL;
2461 	struct dc_sink *prev_sink = NULL;
2462 
2463 	link->type = dc_connection_none;
2464 	prev_sink = link->local_sink;
2465 
2466 	if (prev_sink)
2467 		dc_sink_release(prev_sink);
2468 
2469 	switch (link->connector_signal) {
2470 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2471 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2472 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2473 		break;
2474 	}
2475 
2476 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2477 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2478 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2479 		break;
2480 	}
2481 
2482 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2483 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2484 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2485 		break;
2486 	}
2487 
2488 	case SIGNAL_TYPE_LVDS: {
2489 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2490 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2491 		break;
2492 	}
2493 
2494 	case SIGNAL_TYPE_EDP: {
2495 		sink_caps.transaction_type =
2496 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2497 		sink_caps.signal = SIGNAL_TYPE_EDP;
2498 		break;
2499 	}
2500 
2501 	case SIGNAL_TYPE_DISPLAY_PORT: {
2502 		sink_caps.transaction_type =
2503 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2504 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2505 		break;
2506 	}
2507 
2508 	default:
2509 		DC_ERROR("Invalid connector type! signal:%d\n",
2510 			link->connector_signal);
2511 		return;
2512 	}
2513 
2514 	sink_init_data.link = link;
2515 	sink_init_data.sink_signal = sink_caps.signal;
2516 
2517 	sink = dc_sink_create(&sink_init_data);
2518 	if (!sink) {
2519 		DC_ERROR("Failed to create sink!\n");
2520 		return;
2521 	}
2522 
2523 	/* dc_sink_create returns a new reference */
2524 	link->local_sink = sink;
2525 
2526 	edid_status = dm_helpers_read_local_edid(
2527 			link->ctx,
2528 			link,
2529 			sink);
2530 
2531 	if (edid_status != EDID_OK)
2532 		DC_ERROR("Failed to read EDID");
2533 
2534 }
2535 
2536 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2537 				     struct amdgpu_display_manager *dm)
2538 {
2539 	struct {
2540 		struct dc_surface_update surface_updates[MAX_SURFACES];
2541 		struct dc_plane_info plane_infos[MAX_SURFACES];
2542 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2543 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2544 		struct dc_stream_update stream_update;
2545 	} * bundle;
2546 	int k, m;
2547 
2548 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2549 
2550 	if (!bundle) {
2551 		dm_error("Failed to allocate update bundle\n");
2552 		goto cleanup;
2553 	}
2554 
2555 	for (k = 0; k < dc_state->stream_count; k++) {
2556 		bundle->stream_update.stream = dc_state->streams[k];
2557 
2558 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2559 			bundle->surface_updates[m].surface =
2560 				dc_state->stream_status->plane_states[m];
2561 			bundle->surface_updates[m].surface->force_full_update =
2562 				true;
2563 		}
2564 		dc_commit_updates_for_stream(
2565 			dm->dc, bundle->surface_updates,
2566 			dc_state->stream_status->plane_count,
2567 			dc_state->streams[k], &bundle->stream_update, dc_state);
2568 	}
2569 
2570 cleanup:
2571 	kfree(bundle);
2572 
2573 	return;
2574 }
2575 
2576 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2577 {
2578 	struct dc_stream_state *stream_state;
2579 	struct amdgpu_dm_connector *aconnector = link->priv;
2580 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2581 	struct dc_stream_update stream_update;
2582 	bool dpms_off = true;
2583 
2584 	memset(&stream_update, 0, sizeof(stream_update));
2585 	stream_update.dpms_off = &dpms_off;
2586 
2587 	mutex_lock(&adev->dm.dc_lock);
2588 	stream_state = dc_stream_find_from_link(link);
2589 
2590 	if (stream_state == NULL) {
2591 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2592 		mutex_unlock(&adev->dm.dc_lock);
2593 		return;
2594 	}
2595 
2596 	stream_update.stream = stream_state;
2597 	acrtc_state->force_dpms_off = true;
2598 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2599 				     stream_state, &stream_update,
2600 				     stream_state->ctx->dc->current_state);
2601 	mutex_unlock(&adev->dm.dc_lock);
2602 }
2603 
2604 static int dm_resume(void *handle)
2605 {
2606 	struct amdgpu_device *adev = handle;
2607 	struct drm_device *ddev = adev_to_drm(adev);
2608 	struct amdgpu_display_manager *dm = &adev->dm;
2609 	struct amdgpu_dm_connector *aconnector;
2610 	struct drm_connector *connector;
2611 	struct drm_connector_list_iter iter;
2612 	struct drm_crtc *crtc;
2613 	struct drm_crtc_state *new_crtc_state;
2614 	struct dm_crtc_state *dm_new_crtc_state;
2615 	struct drm_plane *plane;
2616 	struct drm_plane_state *new_plane_state;
2617 	struct dm_plane_state *dm_new_plane_state;
2618 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2619 	enum dc_connection_type new_connection_type = dc_connection_none;
2620 	struct dc_state *dc_state;
2621 	int i, r, j;
2622 
2623 	if (amdgpu_in_reset(adev)) {
2624 		dc_state = dm->cached_dc_state;
2625 
2626 		/*
2627 		 * The dc->current_state is backed up into dm->cached_dc_state
2628 		 * before we commit 0 streams.
2629 		 *
2630 		 * DC will clear link encoder assignments on the real state
2631 		 * but the changes won't propagate over to the copy we made
2632 		 * before the 0 streams commit.
2633 		 *
2634 		 * DC expects that link encoder assignments are *not* valid
2635 		 * when committing a state, so as a workaround we can copy
2636 		 * off of the current state.
2637 		 *
2638 		 * We lose the previous assignments, but we had already
2639 		 * commit 0 streams anyway.
2640 		 */
2641 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2642 
2643 		if (dc_enable_dmub_notifications(adev->dm.dc))
2644 			amdgpu_dm_outbox_init(adev);
2645 
2646 		r = dm_dmub_hw_init(adev);
2647 		if (r)
2648 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2649 
2650 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2651 		dc_resume(dm->dc);
2652 
2653 		amdgpu_dm_irq_resume_early(adev);
2654 
2655 		for (i = 0; i < dc_state->stream_count; i++) {
2656 			dc_state->streams[i]->mode_changed = true;
2657 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2658 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2659 					= 0xffffffff;
2660 			}
2661 		}
2662 
2663 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2664 
2665 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2666 
2667 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2668 
2669 		dc_release_state(dm->cached_dc_state);
2670 		dm->cached_dc_state = NULL;
2671 
2672 		amdgpu_dm_irq_resume_late(adev);
2673 
2674 		mutex_unlock(&dm->dc_lock);
2675 
2676 		return 0;
2677 	}
2678 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2679 	dc_release_state(dm_state->context);
2680 	dm_state->context = dc_create_state(dm->dc);
2681 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2682 	dc_resource_state_construct(dm->dc, dm_state->context);
2683 
2684 	/* Re-enable outbox interrupts for DPIA. */
2685 	if (dc_enable_dmub_notifications(adev->dm.dc))
2686 		amdgpu_dm_outbox_init(adev);
2687 
2688 	/* Before powering on DC we need to re-initialize DMUB. */
2689 	dm_dmub_hw_resume(adev);
2690 
2691 	/* power on hardware */
2692 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2693 
2694 	/* program HPD filter */
2695 	dc_resume(dm->dc);
2696 
2697 	/*
2698 	 * early enable HPD Rx IRQ, should be done before set mode as short
2699 	 * pulse interrupts are used for MST
2700 	 */
2701 	amdgpu_dm_irq_resume_early(adev);
2702 
2703 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2704 	s3_handle_mst(ddev, false);
2705 
2706 	/* Do detection*/
2707 	drm_connector_list_iter_begin(ddev, &iter);
2708 	drm_for_each_connector_iter(connector, &iter) {
2709 		aconnector = to_amdgpu_dm_connector(connector);
2710 
2711 		/*
2712 		 * this is the case when traversing through already created
2713 		 * MST connectors, should be skipped
2714 		 */
2715 		if (aconnector->dc_link &&
2716 		    aconnector->dc_link->type == dc_connection_mst_branch)
2717 			continue;
2718 
2719 		mutex_lock(&aconnector->hpd_lock);
2720 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2721 			DRM_ERROR("KMS: Failed to detect connector\n");
2722 
2723 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2724 			emulated_link_detect(aconnector->dc_link);
2725 		else
2726 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2727 
2728 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2729 			aconnector->fake_enable = false;
2730 
2731 		if (aconnector->dc_sink)
2732 			dc_sink_release(aconnector->dc_sink);
2733 		aconnector->dc_sink = NULL;
2734 		amdgpu_dm_update_connector_after_detect(aconnector);
2735 		mutex_unlock(&aconnector->hpd_lock);
2736 	}
2737 	drm_connector_list_iter_end(&iter);
2738 
2739 	/* Force mode set in atomic commit */
2740 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2741 		new_crtc_state->active_changed = true;
2742 
2743 	/*
2744 	 * atomic_check is expected to create the dc states. We need to release
2745 	 * them here, since they were duplicated as part of the suspend
2746 	 * procedure.
2747 	 */
2748 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2749 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2750 		if (dm_new_crtc_state->stream) {
2751 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2752 			dc_stream_release(dm_new_crtc_state->stream);
2753 			dm_new_crtc_state->stream = NULL;
2754 		}
2755 	}
2756 
2757 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2758 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2759 		if (dm_new_plane_state->dc_state) {
2760 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2761 			dc_plane_state_release(dm_new_plane_state->dc_state);
2762 			dm_new_plane_state->dc_state = NULL;
2763 		}
2764 	}
2765 
2766 	drm_atomic_helper_resume(ddev, dm->cached_state);
2767 
2768 	dm->cached_state = NULL;
2769 
2770 	amdgpu_dm_irq_resume_late(adev);
2771 
2772 	amdgpu_dm_smu_write_watermarks_table(adev);
2773 
2774 	return 0;
2775 }
2776 
2777 /**
2778  * DOC: DM Lifecycle
2779  *
2780  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2781  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2782  * the base driver's device list to be initialized and torn down accordingly.
2783  *
2784  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2785  */
2786 
2787 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2788 	.name = "dm",
2789 	.early_init = dm_early_init,
2790 	.late_init = dm_late_init,
2791 	.sw_init = dm_sw_init,
2792 	.sw_fini = dm_sw_fini,
2793 	.early_fini = amdgpu_dm_early_fini,
2794 	.hw_init = dm_hw_init,
2795 	.hw_fini = dm_hw_fini,
2796 	.suspend = dm_suspend,
2797 	.resume = dm_resume,
2798 	.is_idle = dm_is_idle,
2799 	.wait_for_idle = dm_wait_for_idle,
2800 	.check_soft_reset = dm_check_soft_reset,
2801 	.soft_reset = dm_soft_reset,
2802 	.set_clockgating_state = dm_set_clockgating_state,
2803 	.set_powergating_state = dm_set_powergating_state,
2804 };
2805 
2806 const struct amdgpu_ip_block_version dm_ip_block =
2807 {
2808 	.type = AMD_IP_BLOCK_TYPE_DCE,
2809 	.major = 1,
2810 	.minor = 0,
2811 	.rev = 0,
2812 	.funcs = &amdgpu_dm_funcs,
2813 };
2814 
2815 
2816 /**
2817  * DOC: atomic
2818  *
2819  * *WIP*
2820  */
2821 
2822 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2823 	.fb_create = amdgpu_display_user_framebuffer_create,
2824 	.get_format_info = amd_get_format_info,
2825 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2826 	.atomic_check = amdgpu_dm_atomic_check,
2827 	.atomic_commit = drm_atomic_helper_commit,
2828 };
2829 
2830 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2831 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2832 };
2833 
2834 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2835 {
2836 	u32 max_cll, min_cll, max, min, q, r;
2837 	struct amdgpu_dm_backlight_caps *caps;
2838 	struct amdgpu_display_manager *dm;
2839 	struct drm_connector *conn_base;
2840 	struct amdgpu_device *adev;
2841 	struct dc_link *link = NULL;
2842 	static const u8 pre_computed_values[] = {
2843 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2844 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2845 	int i;
2846 
2847 	if (!aconnector || !aconnector->dc_link)
2848 		return;
2849 
2850 	link = aconnector->dc_link;
2851 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2852 		return;
2853 
2854 	conn_base = &aconnector->base;
2855 	adev = drm_to_adev(conn_base->dev);
2856 	dm = &adev->dm;
2857 	for (i = 0; i < dm->num_of_edps; i++) {
2858 		if (link == dm->backlight_link[i])
2859 			break;
2860 	}
2861 	if (i >= dm->num_of_edps)
2862 		return;
2863 	caps = &dm->backlight_caps[i];
2864 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2865 	caps->aux_support = false;
2866 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2867 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2868 
2869 	if (caps->ext_caps->bits.oled == 1 /*||
2870 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2871 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2872 		caps->aux_support = true;
2873 
2874 	if (amdgpu_backlight == 0)
2875 		caps->aux_support = false;
2876 	else if (amdgpu_backlight == 1)
2877 		caps->aux_support = true;
2878 
2879 	/* From the specification (CTA-861-G), for calculating the maximum
2880 	 * luminance we need to use:
2881 	 *	Luminance = 50*2**(CV/32)
2882 	 * Where CV is a one-byte value.
2883 	 * For calculating this expression we may need float point precision;
2884 	 * to avoid this complexity level, we take advantage that CV is divided
2885 	 * by a constant. From the Euclids division algorithm, we know that CV
2886 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2887 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2888 	 * need to pre-compute the value of r/32. For pre-computing the values
2889 	 * We just used the following Ruby line:
2890 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2891 	 * The results of the above expressions can be verified at
2892 	 * pre_computed_values.
2893 	 */
2894 	q = max_cll >> 5;
2895 	r = max_cll % 32;
2896 	max = (1 << q) * pre_computed_values[r];
2897 
2898 	// min luminance: maxLum * (CV/255)^2 / 100
2899 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2900 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2901 
2902 	caps->aux_max_input_signal = max;
2903 	caps->aux_min_input_signal = min;
2904 }
2905 
2906 void amdgpu_dm_update_connector_after_detect(
2907 		struct amdgpu_dm_connector *aconnector)
2908 {
2909 	struct drm_connector *connector = &aconnector->base;
2910 	struct drm_device *dev = connector->dev;
2911 	struct dc_sink *sink;
2912 
2913 	/* MST handled by drm_mst framework */
2914 	if (aconnector->mst_mgr.mst_state == true)
2915 		return;
2916 
2917 	sink = aconnector->dc_link->local_sink;
2918 	if (sink)
2919 		dc_sink_retain(sink);
2920 
2921 	/*
2922 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2923 	 * the connector sink is set to either fake or physical sink depends on link status.
2924 	 * Skip if already done during boot.
2925 	 */
2926 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2927 			&& aconnector->dc_em_sink) {
2928 
2929 		/*
2930 		 * For S3 resume with headless use eml_sink to fake stream
2931 		 * because on resume connector->sink is set to NULL
2932 		 */
2933 		mutex_lock(&dev->mode_config.mutex);
2934 
2935 		if (sink) {
2936 			if (aconnector->dc_sink) {
2937 				amdgpu_dm_update_freesync_caps(connector, NULL);
2938 				/*
2939 				 * retain and release below are used to
2940 				 * bump up refcount for sink because the link doesn't point
2941 				 * to it anymore after disconnect, so on next crtc to connector
2942 				 * reshuffle by UMD we will get into unwanted dc_sink release
2943 				 */
2944 				dc_sink_release(aconnector->dc_sink);
2945 			}
2946 			aconnector->dc_sink = sink;
2947 			dc_sink_retain(aconnector->dc_sink);
2948 			amdgpu_dm_update_freesync_caps(connector,
2949 					aconnector->edid);
2950 		} else {
2951 			amdgpu_dm_update_freesync_caps(connector, NULL);
2952 			if (!aconnector->dc_sink) {
2953 				aconnector->dc_sink = aconnector->dc_em_sink;
2954 				dc_sink_retain(aconnector->dc_sink);
2955 			}
2956 		}
2957 
2958 		mutex_unlock(&dev->mode_config.mutex);
2959 
2960 		if (sink)
2961 			dc_sink_release(sink);
2962 		return;
2963 	}
2964 
2965 	/*
2966 	 * TODO: temporary guard to look for proper fix
2967 	 * if this sink is MST sink, we should not do anything
2968 	 */
2969 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2970 		dc_sink_release(sink);
2971 		return;
2972 	}
2973 
2974 	if (aconnector->dc_sink == sink) {
2975 		/*
2976 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2977 		 * Do nothing!!
2978 		 */
2979 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2980 				aconnector->connector_id);
2981 		if (sink)
2982 			dc_sink_release(sink);
2983 		return;
2984 	}
2985 
2986 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2987 		aconnector->connector_id, aconnector->dc_sink, sink);
2988 
2989 	mutex_lock(&dev->mode_config.mutex);
2990 
2991 	/*
2992 	 * 1. Update status of the drm connector
2993 	 * 2. Send an event and let userspace tell us what to do
2994 	 */
2995 	if (sink) {
2996 		/*
2997 		 * TODO: check if we still need the S3 mode update workaround.
2998 		 * If yes, put it here.
2999 		 */
3000 		if (aconnector->dc_sink) {
3001 			amdgpu_dm_update_freesync_caps(connector, NULL);
3002 			dc_sink_release(aconnector->dc_sink);
3003 		}
3004 
3005 		aconnector->dc_sink = sink;
3006 		dc_sink_retain(aconnector->dc_sink);
3007 		if (sink->dc_edid.length == 0) {
3008 			aconnector->edid = NULL;
3009 			if (aconnector->dc_link->aux_mode) {
3010 				drm_dp_cec_unset_edid(
3011 					&aconnector->dm_dp_aux.aux);
3012 			}
3013 		} else {
3014 			aconnector->edid =
3015 				(struct edid *)sink->dc_edid.raw_edid;
3016 
3017 			if (aconnector->dc_link->aux_mode)
3018 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3019 						    aconnector->edid);
3020 		}
3021 
3022 		drm_connector_update_edid_property(connector, aconnector->edid);
3023 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3024 		update_connector_ext_caps(aconnector);
3025 	} else {
3026 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3027 		amdgpu_dm_update_freesync_caps(connector, NULL);
3028 		drm_connector_update_edid_property(connector, NULL);
3029 		aconnector->num_modes = 0;
3030 		dc_sink_release(aconnector->dc_sink);
3031 		aconnector->dc_sink = NULL;
3032 		aconnector->edid = NULL;
3033 #ifdef CONFIG_DRM_AMD_DC_HDCP
3034 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3035 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3036 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3037 #endif
3038 	}
3039 
3040 	mutex_unlock(&dev->mode_config.mutex);
3041 
3042 	update_subconnector_property(aconnector);
3043 
3044 	if (sink)
3045 		dc_sink_release(sink);
3046 }
3047 
3048 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3049 {
3050 	struct drm_connector *connector = &aconnector->base;
3051 	struct drm_device *dev = connector->dev;
3052 	enum dc_connection_type new_connection_type = dc_connection_none;
3053 	struct amdgpu_device *adev = drm_to_adev(dev);
3054 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3055 	struct dm_crtc_state *dm_crtc_state = NULL;
3056 
3057 	if (adev->dm.disable_hpd_irq)
3058 		return;
3059 
3060 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3061 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3062 					dm_con_state->base.state,
3063 					dm_con_state->base.crtc));
3064 	/*
3065 	 * In case of failure or MST no need to update connector status or notify the OS
3066 	 * since (for MST case) MST does this in its own context.
3067 	 */
3068 	mutex_lock(&aconnector->hpd_lock);
3069 
3070 #ifdef CONFIG_DRM_AMD_DC_HDCP
3071 	if (adev->dm.hdcp_workqueue) {
3072 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3073 		dm_con_state->update_hdcp = true;
3074 	}
3075 #endif
3076 	if (aconnector->fake_enable)
3077 		aconnector->fake_enable = false;
3078 
3079 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3080 		DRM_ERROR("KMS: Failed to detect connector\n");
3081 
3082 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3083 		emulated_link_detect(aconnector->dc_link);
3084 
3085 		drm_modeset_lock_all(dev);
3086 		dm_restore_drm_connector_state(dev, connector);
3087 		drm_modeset_unlock_all(dev);
3088 
3089 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3090 			drm_kms_helper_connector_hotplug_event(connector);
3091 
3092 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3093 		if (new_connection_type == dc_connection_none &&
3094 		    aconnector->dc_link->type == dc_connection_none &&
3095 		    dm_crtc_state)
3096 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3097 
3098 		amdgpu_dm_update_connector_after_detect(aconnector);
3099 
3100 		drm_modeset_lock_all(dev);
3101 		dm_restore_drm_connector_state(dev, connector);
3102 		drm_modeset_unlock_all(dev);
3103 
3104 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3105 			drm_kms_helper_connector_hotplug_event(connector);
3106 	}
3107 	mutex_unlock(&aconnector->hpd_lock);
3108 
3109 }
3110 
3111 static void handle_hpd_irq(void *param)
3112 {
3113 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3114 
3115 	handle_hpd_irq_helper(aconnector);
3116 
3117 }
3118 
3119 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3120 {
3121 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3122 	uint8_t dret;
3123 	bool new_irq_handled = false;
3124 	int dpcd_addr;
3125 	int dpcd_bytes_to_read;
3126 
3127 	const int max_process_count = 30;
3128 	int process_count = 0;
3129 
3130 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3131 
3132 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3133 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3134 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3135 		dpcd_addr = DP_SINK_COUNT;
3136 	} else {
3137 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3138 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3139 		dpcd_addr = DP_SINK_COUNT_ESI;
3140 	}
3141 
3142 	dret = drm_dp_dpcd_read(
3143 		&aconnector->dm_dp_aux.aux,
3144 		dpcd_addr,
3145 		esi,
3146 		dpcd_bytes_to_read);
3147 
3148 	while (dret == dpcd_bytes_to_read &&
3149 		process_count < max_process_count) {
3150 		uint8_t retry;
3151 		dret = 0;
3152 
3153 		process_count++;
3154 
3155 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3156 		/* handle HPD short pulse irq */
3157 		if (aconnector->mst_mgr.mst_state)
3158 			drm_dp_mst_hpd_irq(
3159 				&aconnector->mst_mgr,
3160 				esi,
3161 				&new_irq_handled);
3162 
3163 		if (new_irq_handled) {
3164 			/* ACK at DPCD to notify down stream */
3165 			const int ack_dpcd_bytes_to_write =
3166 				dpcd_bytes_to_read - 1;
3167 
3168 			for (retry = 0; retry < 3; retry++) {
3169 				uint8_t wret;
3170 
3171 				wret = drm_dp_dpcd_write(
3172 					&aconnector->dm_dp_aux.aux,
3173 					dpcd_addr + 1,
3174 					&esi[1],
3175 					ack_dpcd_bytes_to_write);
3176 				if (wret == ack_dpcd_bytes_to_write)
3177 					break;
3178 			}
3179 
3180 			/* check if there is new irq to be handled */
3181 			dret = drm_dp_dpcd_read(
3182 				&aconnector->dm_dp_aux.aux,
3183 				dpcd_addr,
3184 				esi,
3185 				dpcd_bytes_to_read);
3186 
3187 			new_irq_handled = false;
3188 		} else {
3189 			break;
3190 		}
3191 	}
3192 
3193 	if (process_count == max_process_count)
3194 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3195 }
3196 
3197 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3198 							union hpd_irq_data hpd_irq_data)
3199 {
3200 	struct hpd_rx_irq_offload_work *offload_work =
3201 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3202 
3203 	if (!offload_work) {
3204 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3205 		return;
3206 	}
3207 
3208 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3209 	offload_work->data = hpd_irq_data;
3210 	offload_work->offload_wq = offload_wq;
3211 
3212 	queue_work(offload_wq->wq, &offload_work->work);
3213 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3214 }
3215 
3216 static void handle_hpd_rx_irq(void *param)
3217 {
3218 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3219 	struct drm_connector *connector = &aconnector->base;
3220 	struct drm_device *dev = connector->dev;
3221 	struct dc_link *dc_link = aconnector->dc_link;
3222 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3223 	bool result = false;
3224 	enum dc_connection_type new_connection_type = dc_connection_none;
3225 	struct amdgpu_device *adev = drm_to_adev(dev);
3226 	union hpd_irq_data hpd_irq_data;
3227 	bool link_loss = false;
3228 	bool has_left_work = false;
3229 	int idx = aconnector->base.index;
3230 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3231 
3232 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3233 
3234 	if (adev->dm.disable_hpd_irq)
3235 		return;
3236 
3237 	/*
3238 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3239 	 * conflict, after implement i2c helper, this mutex should be
3240 	 * retired.
3241 	 */
3242 	mutex_lock(&aconnector->hpd_lock);
3243 
3244 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3245 						&link_loss, true, &has_left_work);
3246 
3247 	if (!has_left_work)
3248 		goto out;
3249 
3250 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3251 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3252 		goto out;
3253 	}
3254 
3255 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3256 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3257 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3258 			dm_handle_mst_sideband_msg(aconnector);
3259 			goto out;
3260 		}
3261 
3262 		if (link_loss) {
3263 			bool skip = false;
3264 
3265 			spin_lock(&offload_wq->offload_lock);
3266 			skip = offload_wq->is_handling_link_loss;
3267 
3268 			if (!skip)
3269 				offload_wq->is_handling_link_loss = true;
3270 
3271 			spin_unlock(&offload_wq->offload_lock);
3272 
3273 			if (!skip)
3274 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3275 
3276 			goto out;
3277 		}
3278 	}
3279 
3280 out:
3281 	if (result && !is_mst_root_connector) {
3282 		/* Downstream Port status changed. */
3283 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3284 			DRM_ERROR("KMS: Failed to detect connector\n");
3285 
3286 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3287 			emulated_link_detect(dc_link);
3288 
3289 			if (aconnector->fake_enable)
3290 				aconnector->fake_enable = false;
3291 
3292 			amdgpu_dm_update_connector_after_detect(aconnector);
3293 
3294 
3295 			drm_modeset_lock_all(dev);
3296 			dm_restore_drm_connector_state(dev, connector);
3297 			drm_modeset_unlock_all(dev);
3298 
3299 			drm_kms_helper_connector_hotplug_event(connector);
3300 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3301 
3302 			if (aconnector->fake_enable)
3303 				aconnector->fake_enable = false;
3304 
3305 			amdgpu_dm_update_connector_after_detect(aconnector);
3306 
3307 
3308 			drm_modeset_lock_all(dev);
3309 			dm_restore_drm_connector_state(dev, connector);
3310 			drm_modeset_unlock_all(dev);
3311 
3312 			drm_kms_helper_connector_hotplug_event(connector);
3313 		}
3314 	}
3315 #ifdef CONFIG_DRM_AMD_DC_HDCP
3316 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3317 		if (adev->dm.hdcp_workqueue)
3318 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3319 	}
3320 #endif
3321 
3322 	if (dc_link->type != dc_connection_mst_branch)
3323 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3324 
3325 	mutex_unlock(&aconnector->hpd_lock);
3326 }
3327 
3328 static void register_hpd_handlers(struct amdgpu_device *adev)
3329 {
3330 	struct drm_device *dev = adev_to_drm(adev);
3331 	struct drm_connector *connector;
3332 	struct amdgpu_dm_connector *aconnector;
3333 	const struct dc_link *dc_link;
3334 	struct dc_interrupt_params int_params = {0};
3335 
3336 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3337 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3338 
3339 	list_for_each_entry(connector,
3340 			&dev->mode_config.connector_list, head)	{
3341 
3342 		aconnector = to_amdgpu_dm_connector(connector);
3343 		dc_link = aconnector->dc_link;
3344 
3345 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3346 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3347 			int_params.irq_source = dc_link->irq_source_hpd;
3348 
3349 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3350 					handle_hpd_irq,
3351 					(void *) aconnector);
3352 		}
3353 
3354 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3355 
3356 			/* Also register for DP short pulse (hpd_rx). */
3357 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3358 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3359 
3360 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3361 					handle_hpd_rx_irq,
3362 					(void *) aconnector);
3363 
3364 			if (adev->dm.hpd_rx_offload_wq)
3365 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3366 					aconnector;
3367 		}
3368 	}
3369 }
3370 
3371 #if defined(CONFIG_DRM_AMD_DC_SI)
3372 /* Register IRQ sources and initialize IRQ callbacks */
3373 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3374 {
3375 	struct dc *dc = adev->dm.dc;
3376 	struct common_irq_params *c_irq_params;
3377 	struct dc_interrupt_params int_params = {0};
3378 	int r;
3379 	int i;
3380 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3381 
3382 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3383 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3384 
3385 	/*
3386 	 * Actions of amdgpu_irq_add_id():
3387 	 * 1. Register a set() function with base driver.
3388 	 *    Base driver will call set() function to enable/disable an
3389 	 *    interrupt in DC hardware.
3390 	 * 2. Register amdgpu_dm_irq_handler().
3391 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3392 	 *    coming from DC hardware.
3393 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3394 	 *    for acknowledging and handling. */
3395 
3396 	/* Use VBLANK interrupt */
3397 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3398 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3399 		if (r) {
3400 			DRM_ERROR("Failed to add crtc irq id!\n");
3401 			return r;
3402 		}
3403 
3404 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3405 		int_params.irq_source =
3406 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3407 
3408 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3409 
3410 		c_irq_params->adev = adev;
3411 		c_irq_params->irq_src = int_params.irq_source;
3412 
3413 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3414 				dm_crtc_high_irq, c_irq_params);
3415 	}
3416 
3417 	/* Use GRPH_PFLIP interrupt */
3418 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3419 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3420 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3421 		if (r) {
3422 			DRM_ERROR("Failed to add page flip irq id!\n");
3423 			return r;
3424 		}
3425 
3426 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3427 		int_params.irq_source =
3428 			dc_interrupt_to_irq_source(dc, i, 0);
3429 
3430 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3431 
3432 		c_irq_params->adev = adev;
3433 		c_irq_params->irq_src = int_params.irq_source;
3434 
3435 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3436 				dm_pflip_high_irq, c_irq_params);
3437 
3438 	}
3439 
3440 	/* HPD */
3441 	r = amdgpu_irq_add_id(adev, client_id,
3442 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3443 	if (r) {
3444 		DRM_ERROR("Failed to add hpd irq id!\n");
3445 		return r;
3446 	}
3447 
3448 	register_hpd_handlers(adev);
3449 
3450 	return 0;
3451 }
3452 #endif
3453 
3454 /* Register IRQ sources and initialize IRQ callbacks */
3455 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3456 {
3457 	struct dc *dc = adev->dm.dc;
3458 	struct common_irq_params *c_irq_params;
3459 	struct dc_interrupt_params int_params = {0};
3460 	int r;
3461 	int i;
3462 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3463 
3464 	if (adev->family >= AMDGPU_FAMILY_AI)
3465 		client_id = SOC15_IH_CLIENTID_DCE;
3466 
3467 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3468 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3469 
3470 	/*
3471 	 * Actions of amdgpu_irq_add_id():
3472 	 * 1. Register a set() function with base driver.
3473 	 *    Base driver will call set() function to enable/disable an
3474 	 *    interrupt in DC hardware.
3475 	 * 2. Register amdgpu_dm_irq_handler().
3476 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3477 	 *    coming from DC hardware.
3478 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3479 	 *    for acknowledging and handling. */
3480 
3481 	/* Use VBLANK interrupt */
3482 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3483 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3484 		if (r) {
3485 			DRM_ERROR("Failed to add crtc irq id!\n");
3486 			return r;
3487 		}
3488 
3489 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3490 		int_params.irq_source =
3491 			dc_interrupt_to_irq_source(dc, i, 0);
3492 
3493 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3494 
3495 		c_irq_params->adev = adev;
3496 		c_irq_params->irq_src = int_params.irq_source;
3497 
3498 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3499 				dm_crtc_high_irq, c_irq_params);
3500 	}
3501 
3502 	/* Use VUPDATE interrupt */
3503 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3504 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3505 		if (r) {
3506 			DRM_ERROR("Failed to add vupdate irq id!\n");
3507 			return r;
3508 		}
3509 
3510 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3511 		int_params.irq_source =
3512 			dc_interrupt_to_irq_source(dc, i, 0);
3513 
3514 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3515 
3516 		c_irq_params->adev = adev;
3517 		c_irq_params->irq_src = int_params.irq_source;
3518 
3519 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3520 				dm_vupdate_high_irq, c_irq_params);
3521 	}
3522 
3523 	/* Use GRPH_PFLIP interrupt */
3524 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3525 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3526 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3527 		if (r) {
3528 			DRM_ERROR("Failed to add page flip irq id!\n");
3529 			return r;
3530 		}
3531 
3532 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3533 		int_params.irq_source =
3534 			dc_interrupt_to_irq_source(dc, i, 0);
3535 
3536 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3537 
3538 		c_irq_params->adev = adev;
3539 		c_irq_params->irq_src = int_params.irq_source;
3540 
3541 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3542 				dm_pflip_high_irq, c_irq_params);
3543 
3544 	}
3545 
3546 	/* HPD */
3547 	r = amdgpu_irq_add_id(adev, client_id,
3548 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3549 	if (r) {
3550 		DRM_ERROR("Failed to add hpd irq id!\n");
3551 		return r;
3552 	}
3553 
3554 	register_hpd_handlers(adev);
3555 
3556 	return 0;
3557 }
3558 
3559 /* Register IRQ sources and initialize IRQ callbacks */
3560 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3561 {
3562 	struct dc *dc = adev->dm.dc;
3563 	struct common_irq_params *c_irq_params;
3564 	struct dc_interrupt_params int_params = {0};
3565 	int r;
3566 	int i;
3567 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3568 	static const unsigned int vrtl_int_srcid[] = {
3569 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3570 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3571 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3572 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3573 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3574 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3575 	};
3576 #endif
3577 
3578 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3579 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3580 
3581 	/*
3582 	 * Actions of amdgpu_irq_add_id():
3583 	 * 1. Register a set() function with base driver.
3584 	 *    Base driver will call set() function to enable/disable an
3585 	 *    interrupt in DC hardware.
3586 	 * 2. Register amdgpu_dm_irq_handler().
3587 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3588 	 *    coming from DC hardware.
3589 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3590 	 *    for acknowledging and handling.
3591 	 */
3592 
3593 	/* Use VSTARTUP interrupt */
3594 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3595 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3596 			i++) {
3597 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3598 
3599 		if (r) {
3600 			DRM_ERROR("Failed to add crtc irq id!\n");
3601 			return r;
3602 		}
3603 
3604 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3605 		int_params.irq_source =
3606 			dc_interrupt_to_irq_source(dc, i, 0);
3607 
3608 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3609 
3610 		c_irq_params->adev = adev;
3611 		c_irq_params->irq_src = int_params.irq_source;
3612 
3613 		amdgpu_dm_irq_register_interrupt(
3614 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3615 	}
3616 
3617 	/* Use otg vertical line interrupt */
3618 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3619 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3620 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3621 				vrtl_int_srcid[i], &adev->vline0_irq);
3622 
3623 		if (r) {
3624 			DRM_ERROR("Failed to add vline0 irq id!\n");
3625 			return r;
3626 		}
3627 
3628 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3629 		int_params.irq_source =
3630 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3631 
3632 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3633 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3634 			break;
3635 		}
3636 
3637 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3638 					- DC_IRQ_SOURCE_DC1_VLINE0];
3639 
3640 		c_irq_params->adev = adev;
3641 		c_irq_params->irq_src = int_params.irq_source;
3642 
3643 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3644 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3645 	}
3646 #endif
3647 
3648 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3649 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3650 	 * to trigger at end of each vblank, regardless of state of the lock,
3651 	 * matching DCE behaviour.
3652 	 */
3653 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3654 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3655 	     i++) {
3656 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3657 
3658 		if (r) {
3659 			DRM_ERROR("Failed to add vupdate irq id!\n");
3660 			return r;
3661 		}
3662 
3663 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3664 		int_params.irq_source =
3665 			dc_interrupt_to_irq_source(dc, i, 0);
3666 
3667 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3668 
3669 		c_irq_params->adev = adev;
3670 		c_irq_params->irq_src = int_params.irq_source;
3671 
3672 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3673 				dm_vupdate_high_irq, c_irq_params);
3674 	}
3675 
3676 	/* Use GRPH_PFLIP interrupt */
3677 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3678 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3679 			i++) {
3680 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3681 		if (r) {
3682 			DRM_ERROR("Failed to add page flip irq id!\n");
3683 			return r;
3684 		}
3685 
3686 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3687 		int_params.irq_source =
3688 			dc_interrupt_to_irq_source(dc, i, 0);
3689 
3690 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3691 
3692 		c_irq_params->adev = adev;
3693 		c_irq_params->irq_src = int_params.irq_source;
3694 
3695 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3696 				dm_pflip_high_irq, c_irq_params);
3697 
3698 	}
3699 
3700 	/* HPD */
3701 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3702 			&adev->hpd_irq);
3703 	if (r) {
3704 		DRM_ERROR("Failed to add hpd irq id!\n");
3705 		return r;
3706 	}
3707 
3708 	register_hpd_handlers(adev);
3709 
3710 	return 0;
3711 }
3712 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3713 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3714 {
3715 	struct dc *dc = adev->dm.dc;
3716 	struct common_irq_params *c_irq_params;
3717 	struct dc_interrupt_params int_params = {0};
3718 	int r, i;
3719 
3720 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3721 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3722 
3723 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3724 			&adev->dmub_outbox_irq);
3725 	if (r) {
3726 		DRM_ERROR("Failed to add outbox irq id!\n");
3727 		return r;
3728 	}
3729 
3730 	if (dc->ctx->dmub_srv) {
3731 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3732 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3733 		int_params.irq_source =
3734 		dc_interrupt_to_irq_source(dc, i, 0);
3735 
3736 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3737 
3738 		c_irq_params->adev = adev;
3739 		c_irq_params->irq_src = int_params.irq_source;
3740 
3741 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3742 				dm_dmub_outbox1_low_irq, c_irq_params);
3743 	}
3744 
3745 	return 0;
3746 }
3747 
3748 /*
3749  * Acquires the lock for the atomic state object and returns
3750  * the new atomic state.
3751  *
3752  * This should only be called during atomic check.
3753  */
3754 int dm_atomic_get_state(struct drm_atomic_state *state,
3755 			struct dm_atomic_state **dm_state)
3756 {
3757 	struct drm_device *dev = state->dev;
3758 	struct amdgpu_device *adev = drm_to_adev(dev);
3759 	struct amdgpu_display_manager *dm = &adev->dm;
3760 	struct drm_private_state *priv_state;
3761 
3762 	if (*dm_state)
3763 		return 0;
3764 
3765 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3766 	if (IS_ERR(priv_state))
3767 		return PTR_ERR(priv_state);
3768 
3769 	*dm_state = to_dm_atomic_state(priv_state);
3770 
3771 	return 0;
3772 }
3773 
3774 static struct dm_atomic_state *
3775 dm_atomic_get_new_state(struct drm_atomic_state *state)
3776 {
3777 	struct drm_device *dev = state->dev;
3778 	struct amdgpu_device *adev = drm_to_adev(dev);
3779 	struct amdgpu_display_manager *dm = &adev->dm;
3780 	struct drm_private_obj *obj;
3781 	struct drm_private_state *new_obj_state;
3782 	int i;
3783 
3784 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3785 		if (obj->funcs == dm->atomic_obj.funcs)
3786 			return to_dm_atomic_state(new_obj_state);
3787 	}
3788 
3789 	return NULL;
3790 }
3791 
3792 static struct drm_private_state *
3793 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3794 {
3795 	struct dm_atomic_state *old_state, *new_state;
3796 
3797 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3798 	if (!new_state)
3799 		return NULL;
3800 
3801 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3802 
3803 	old_state = to_dm_atomic_state(obj->state);
3804 
3805 	if (old_state && old_state->context)
3806 		new_state->context = dc_copy_state(old_state->context);
3807 
3808 	if (!new_state->context) {
3809 		kfree(new_state);
3810 		return NULL;
3811 	}
3812 
3813 	return &new_state->base;
3814 }
3815 
3816 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3817 				    struct drm_private_state *state)
3818 {
3819 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3820 
3821 	if (dm_state && dm_state->context)
3822 		dc_release_state(dm_state->context);
3823 
3824 	kfree(dm_state);
3825 }
3826 
3827 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3828 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3829 	.atomic_destroy_state = dm_atomic_destroy_state,
3830 };
3831 
3832 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3833 {
3834 	struct dm_atomic_state *state;
3835 	int r;
3836 
3837 	adev->mode_info.mode_config_initialized = true;
3838 
3839 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3840 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3841 
3842 	adev_to_drm(adev)->mode_config.max_width = 16384;
3843 	adev_to_drm(adev)->mode_config.max_height = 16384;
3844 
3845 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3846 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3847 	/* indicates support for immediate flip */
3848 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3849 
3850 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3851 
3852 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3853 	if (!state)
3854 		return -ENOMEM;
3855 
3856 	state->context = dc_create_state(adev->dm.dc);
3857 	if (!state->context) {
3858 		kfree(state);
3859 		return -ENOMEM;
3860 	}
3861 
3862 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3863 
3864 	drm_atomic_private_obj_init(adev_to_drm(adev),
3865 				    &adev->dm.atomic_obj,
3866 				    &state->base,
3867 				    &dm_atomic_state_funcs);
3868 
3869 	r = amdgpu_display_modeset_create_props(adev);
3870 	if (r) {
3871 		dc_release_state(state->context);
3872 		kfree(state);
3873 		return r;
3874 	}
3875 
3876 	r = amdgpu_dm_audio_init(adev);
3877 	if (r) {
3878 		dc_release_state(state->context);
3879 		kfree(state);
3880 		return r;
3881 	}
3882 
3883 	return 0;
3884 }
3885 
3886 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3887 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3888 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3889 
3890 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3891 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3892 
3893 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3894 					    int bl_idx)
3895 {
3896 #if defined(CONFIG_ACPI)
3897 	struct amdgpu_dm_backlight_caps caps;
3898 
3899 	memset(&caps, 0, sizeof(caps));
3900 
3901 	if (dm->backlight_caps[bl_idx].caps_valid)
3902 		return;
3903 
3904 	amdgpu_acpi_get_backlight_caps(&caps);
3905 	if (caps.caps_valid) {
3906 		dm->backlight_caps[bl_idx].caps_valid = true;
3907 		if (caps.aux_support)
3908 			return;
3909 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3910 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3911 	} else {
3912 		dm->backlight_caps[bl_idx].min_input_signal =
3913 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3914 		dm->backlight_caps[bl_idx].max_input_signal =
3915 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3916 	}
3917 #else
3918 	if (dm->backlight_caps[bl_idx].aux_support)
3919 		return;
3920 
3921 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3922 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3923 #endif
3924 }
3925 
3926 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3927 				unsigned *min, unsigned *max)
3928 {
3929 	if (!caps)
3930 		return 0;
3931 
3932 	if (caps->aux_support) {
3933 		// Firmware limits are in nits, DC API wants millinits.
3934 		*max = 1000 * caps->aux_max_input_signal;
3935 		*min = 1000 * caps->aux_min_input_signal;
3936 	} else {
3937 		// Firmware limits are 8-bit, PWM control is 16-bit.
3938 		*max = 0x101 * caps->max_input_signal;
3939 		*min = 0x101 * caps->min_input_signal;
3940 	}
3941 	return 1;
3942 }
3943 
3944 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3945 					uint32_t brightness)
3946 {
3947 	unsigned min, max;
3948 
3949 	if (!get_brightness_range(caps, &min, &max))
3950 		return brightness;
3951 
3952 	// Rescale 0..255 to min..max
3953 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3954 				       AMDGPU_MAX_BL_LEVEL);
3955 }
3956 
3957 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3958 				      uint32_t brightness)
3959 {
3960 	unsigned min, max;
3961 
3962 	if (!get_brightness_range(caps, &min, &max))
3963 		return brightness;
3964 
3965 	if (brightness < min)
3966 		return 0;
3967 	// Rescale min..max to 0..255
3968 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3969 				 max - min);
3970 }
3971 
3972 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3973 					 int bl_idx,
3974 					 u32 user_brightness)
3975 {
3976 	struct amdgpu_dm_backlight_caps caps;
3977 	struct dc_link *link;
3978 	u32 brightness;
3979 	bool rc;
3980 
3981 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3982 	caps = dm->backlight_caps[bl_idx];
3983 
3984 	dm->brightness[bl_idx] = user_brightness;
3985 	/* update scratch register */
3986 	if (bl_idx == 0)
3987 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3988 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3989 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3990 
3991 	/* Change brightness based on AUX property */
3992 	if (caps.aux_support) {
3993 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3994 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3995 		if (!rc)
3996 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3997 	} else {
3998 		rc = dc_link_set_backlight_level(link, brightness, 0);
3999 		if (!rc)
4000 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4001 	}
4002 
4003 	if (rc)
4004 		dm->actual_brightness[bl_idx] = user_brightness;
4005 }
4006 
4007 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4008 {
4009 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4010 	int i;
4011 
4012 	for (i = 0; i < dm->num_of_edps; i++) {
4013 		if (bd == dm->backlight_dev[i])
4014 			break;
4015 	}
4016 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4017 		i = 0;
4018 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4019 
4020 	return 0;
4021 }
4022 
4023 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4024 					 int bl_idx)
4025 {
4026 	struct amdgpu_dm_backlight_caps caps;
4027 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4028 
4029 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4030 	caps = dm->backlight_caps[bl_idx];
4031 
4032 	if (caps.aux_support) {
4033 		u32 avg, peak;
4034 		bool rc;
4035 
4036 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4037 		if (!rc)
4038 			return dm->brightness[bl_idx];
4039 		return convert_brightness_to_user(&caps, avg);
4040 	} else {
4041 		int ret = dc_link_get_backlight_level(link);
4042 
4043 		if (ret == DC_ERROR_UNEXPECTED)
4044 			return dm->brightness[bl_idx];
4045 		return convert_brightness_to_user(&caps, ret);
4046 	}
4047 }
4048 
4049 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4050 {
4051 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4052 	int i;
4053 
4054 	for (i = 0; i < dm->num_of_edps; i++) {
4055 		if (bd == dm->backlight_dev[i])
4056 			break;
4057 	}
4058 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4059 		i = 0;
4060 	return amdgpu_dm_backlight_get_level(dm, i);
4061 }
4062 
4063 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4064 	.options = BL_CORE_SUSPENDRESUME,
4065 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4066 	.update_status	= amdgpu_dm_backlight_update_status,
4067 };
4068 
4069 static void
4070 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4071 {
4072 	char bl_name[16];
4073 	struct backlight_properties props = { 0 };
4074 
4075 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4076 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4077 
4078 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4079 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4080 	props.type = BACKLIGHT_RAW;
4081 
4082 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4083 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4084 
4085 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4086 								       adev_to_drm(dm->adev)->dev,
4087 								       dm,
4088 								       &amdgpu_dm_backlight_ops,
4089 								       &props);
4090 
4091 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4092 		DRM_ERROR("DM: Backlight registration failed!\n");
4093 	else
4094 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4095 }
4096 #endif
4097 
4098 static int initialize_plane(struct amdgpu_display_manager *dm,
4099 			    struct amdgpu_mode_info *mode_info, int plane_id,
4100 			    enum drm_plane_type plane_type,
4101 			    const struct dc_plane_cap *plane_cap)
4102 {
4103 	struct drm_plane *plane;
4104 	unsigned long possible_crtcs;
4105 	int ret = 0;
4106 
4107 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4108 	if (!plane) {
4109 		DRM_ERROR("KMS: Failed to allocate plane\n");
4110 		return -ENOMEM;
4111 	}
4112 	plane->type = plane_type;
4113 
4114 	/*
4115 	 * HACK: IGT tests expect that the primary plane for a CRTC
4116 	 * can only have one possible CRTC. Only expose support for
4117 	 * any CRTC if they're not going to be used as a primary plane
4118 	 * for a CRTC - like overlay or underlay planes.
4119 	 */
4120 	possible_crtcs = 1 << plane_id;
4121 	if (plane_id >= dm->dc->caps.max_streams)
4122 		possible_crtcs = 0xff;
4123 
4124 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4125 
4126 	if (ret) {
4127 		DRM_ERROR("KMS: Failed to initialize plane\n");
4128 		kfree(plane);
4129 		return ret;
4130 	}
4131 
4132 	if (mode_info)
4133 		mode_info->planes[plane_id] = plane;
4134 
4135 	return ret;
4136 }
4137 
4138 
4139 static void register_backlight_device(struct amdgpu_display_manager *dm,
4140 				      struct dc_link *link)
4141 {
4142 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4143 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4144 
4145 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4146 	    link->type != dc_connection_none) {
4147 		/*
4148 		 * Event if registration failed, we should continue with
4149 		 * DM initialization because not having a backlight control
4150 		 * is better then a black screen.
4151 		 */
4152 		if (!dm->backlight_dev[dm->num_of_edps])
4153 			amdgpu_dm_register_backlight_device(dm);
4154 
4155 		if (dm->backlight_dev[dm->num_of_edps]) {
4156 			dm->backlight_link[dm->num_of_edps] = link;
4157 			dm->num_of_edps++;
4158 		}
4159 	}
4160 #endif
4161 }
4162 
4163 
4164 /*
4165  * In this architecture, the association
4166  * connector -> encoder -> crtc
4167  * id not really requried. The crtc and connector will hold the
4168  * display_index as an abstraction to use with DAL component
4169  *
4170  * Returns 0 on success
4171  */
4172 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4173 {
4174 	struct amdgpu_display_manager *dm = &adev->dm;
4175 	int32_t i;
4176 	struct amdgpu_dm_connector *aconnector = NULL;
4177 	struct amdgpu_encoder *aencoder = NULL;
4178 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4179 	uint32_t link_cnt;
4180 	int32_t primary_planes;
4181 	enum dc_connection_type new_connection_type = dc_connection_none;
4182 	const struct dc_plane_cap *plane;
4183 	bool psr_feature_enabled = false;
4184 
4185 	dm->display_indexes_num = dm->dc->caps.max_streams;
4186 	/* Update the actual used number of crtc */
4187 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4188 
4189 	link_cnt = dm->dc->caps.max_links;
4190 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4191 		DRM_ERROR("DM: Failed to initialize mode config\n");
4192 		return -EINVAL;
4193 	}
4194 
4195 	/* There is one primary plane per CRTC */
4196 	primary_planes = dm->dc->caps.max_streams;
4197 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4198 
4199 	/*
4200 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4201 	 * Order is reversed to match iteration order in atomic check.
4202 	 */
4203 	for (i = (primary_planes - 1); i >= 0; i--) {
4204 		plane = &dm->dc->caps.planes[i];
4205 
4206 		if (initialize_plane(dm, mode_info, i,
4207 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4208 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4209 			goto fail;
4210 		}
4211 	}
4212 
4213 	/*
4214 	 * Initialize overlay planes, index starting after primary planes.
4215 	 * These planes have a higher DRM index than the primary planes since
4216 	 * they should be considered as having a higher z-order.
4217 	 * Order is reversed to match iteration order in atomic check.
4218 	 *
4219 	 * Only support DCN for now, and only expose one so we don't encourage
4220 	 * userspace to use up all the pipes.
4221 	 */
4222 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4223 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4224 
4225 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4226 			continue;
4227 
4228 		if (!plane->blends_with_above || !plane->blends_with_below)
4229 			continue;
4230 
4231 		if (!plane->pixel_format_support.argb8888)
4232 			continue;
4233 
4234 		if (initialize_plane(dm, NULL, primary_planes + i,
4235 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4236 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4237 			goto fail;
4238 		}
4239 
4240 		/* Only create one overlay plane. */
4241 		break;
4242 	}
4243 
4244 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4245 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4246 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4247 			goto fail;
4248 		}
4249 
4250 	/* Use Outbox interrupt */
4251 	switch (adev->ip_versions[DCE_HWIP][0]) {
4252 	case IP_VERSION(3, 0, 0):
4253 	case IP_VERSION(3, 1, 2):
4254 	case IP_VERSION(3, 1, 3):
4255 	case IP_VERSION(3, 1, 5):
4256 	case IP_VERSION(3, 1, 6):
4257 	case IP_VERSION(3, 2, 0):
4258 	case IP_VERSION(3, 2, 1):
4259 	case IP_VERSION(2, 1, 0):
4260 		if (register_outbox_irq_handlers(dm->adev)) {
4261 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4262 			goto fail;
4263 		}
4264 		break;
4265 	default:
4266 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4267 			      adev->ip_versions[DCE_HWIP][0]);
4268 	}
4269 
4270 	/* Determine whether to enable PSR support by default. */
4271 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4272 		switch (adev->ip_versions[DCE_HWIP][0]) {
4273 		case IP_VERSION(3, 1, 2):
4274 		case IP_VERSION(3, 1, 3):
4275 		case IP_VERSION(3, 1, 5):
4276 		case IP_VERSION(3, 1, 6):
4277 		case IP_VERSION(3, 2, 0):
4278 		case IP_VERSION(3, 2, 1):
4279 			psr_feature_enabled = true;
4280 			break;
4281 		default:
4282 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4283 			break;
4284 		}
4285 	}
4286 
4287 	/* Disable vblank IRQs aggressively for power-saving. */
4288 	adev_to_drm(adev)->vblank_disable_immediate = true;
4289 
4290 	/* loops over all connectors on the board */
4291 	for (i = 0; i < link_cnt; i++) {
4292 		struct dc_link *link = NULL;
4293 
4294 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4295 			DRM_ERROR(
4296 				"KMS: Cannot support more than %d display indexes\n",
4297 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4298 			continue;
4299 		}
4300 
4301 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4302 		if (!aconnector)
4303 			goto fail;
4304 
4305 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4306 		if (!aencoder)
4307 			goto fail;
4308 
4309 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4310 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4311 			goto fail;
4312 		}
4313 
4314 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4315 			DRM_ERROR("KMS: Failed to initialize connector\n");
4316 			goto fail;
4317 		}
4318 
4319 		link = dc_get_link_at_index(dm->dc, i);
4320 
4321 		if (!dc_link_detect_sink(link, &new_connection_type))
4322 			DRM_ERROR("KMS: Failed to detect connector\n");
4323 
4324 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4325 			emulated_link_detect(link);
4326 			amdgpu_dm_update_connector_after_detect(aconnector);
4327 
4328 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4329 			amdgpu_dm_update_connector_after_detect(aconnector);
4330 			register_backlight_device(dm, link);
4331 			if (dm->num_of_edps)
4332 				update_connector_ext_caps(aconnector);
4333 			if (psr_feature_enabled)
4334 				amdgpu_dm_set_psr_caps(link);
4335 
4336 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4337 			 * PSR is also supported.
4338 			 */
4339 			if (link->psr_settings.psr_feature_enabled)
4340 				adev_to_drm(adev)->vblank_disable_immediate = false;
4341 		}
4342 
4343 
4344 	}
4345 
4346 	/* Software is initialized. Now we can register interrupt handlers. */
4347 	switch (adev->asic_type) {
4348 #if defined(CONFIG_DRM_AMD_DC_SI)
4349 	case CHIP_TAHITI:
4350 	case CHIP_PITCAIRN:
4351 	case CHIP_VERDE:
4352 	case CHIP_OLAND:
4353 		if (dce60_register_irq_handlers(dm->adev)) {
4354 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4355 			goto fail;
4356 		}
4357 		break;
4358 #endif
4359 	case CHIP_BONAIRE:
4360 	case CHIP_HAWAII:
4361 	case CHIP_KAVERI:
4362 	case CHIP_KABINI:
4363 	case CHIP_MULLINS:
4364 	case CHIP_TONGA:
4365 	case CHIP_FIJI:
4366 	case CHIP_CARRIZO:
4367 	case CHIP_STONEY:
4368 	case CHIP_POLARIS11:
4369 	case CHIP_POLARIS10:
4370 	case CHIP_POLARIS12:
4371 	case CHIP_VEGAM:
4372 	case CHIP_VEGA10:
4373 	case CHIP_VEGA12:
4374 	case CHIP_VEGA20:
4375 		if (dce110_register_irq_handlers(dm->adev)) {
4376 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4377 			goto fail;
4378 		}
4379 		break;
4380 	default:
4381 		switch (adev->ip_versions[DCE_HWIP][0]) {
4382 		case IP_VERSION(1, 0, 0):
4383 		case IP_VERSION(1, 0, 1):
4384 		case IP_VERSION(2, 0, 2):
4385 		case IP_VERSION(2, 0, 3):
4386 		case IP_VERSION(2, 0, 0):
4387 		case IP_VERSION(2, 1, 0):
4388 		case IP_VERSION(3, 0, 0):
4389 		case IP_VERSION(3, 0, 2):
4390 		case IP_VERSION(3, 0, 3):
4391 		case IP_VERSION(3, 0, 1):
4392 		case IP_VERSION(3, 1, 2):
4393 		case IP_VERSION(3, 1, 3):
4394 		case IP_VERSION(3, 1, 5):
4395 		case IP_VERSION(3, 1, 6):
4396 		case IP_VERSION(3, 2, 0):
4397 		case IP_VERSION(3, 2, 1):
4398 			if (dcn10_register_irq_handlers(dm->adev)) {
4399 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4400 				goto fail;
4401 			}
4402 			break;
4403 		default:
4404 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4405 					adev->ip_versions[DCE_HWIP][0]);
4406 			goto fail;
4407 		}
4408 		break;
4409 	}
4410 
4411 	return 0;
4412 fail:
4413 	kfree(aencoder);
4414 	kfree(aconnector);
4415 
4416 	return -EINVAL;
4417 }
4418 
4419 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4420 {
4421 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4422 	return;
4423 }
4424 
4425 /******************************************************************************
4426  * amdgpu_display_funcs functions
4427  *****************************************************************************/
4428 
4429 /*
4430  * dm_bandwidth_update - program display watermarks
4431  *
4432  * @adev: amdgpu_device pointer
4433  *
4434  * Calculate and program the display watermarks and line buffer allocation.
4435  */
4436 static void dm_bandwidth_update(struct amdgpu_device *adev)
4437 {
4438 	/* TODO: implement later */
4439 }
4440 
4441 static const struct amdgpu_display_funcs dm_display_funcs = {
4442 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4443 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4444 	.backlight_set_level = NULL, /* never called for DC */
4445 	.backlight_get_level = NULL, /* never called for DC */
4446 	.hpd_sense = NULL,/* called unconditionally */
4447 	.hpd_set_polarity = NULL, /* called unconditionally */
4448 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4449 	.page_flip_get_scanoutpos =
4450 		dm_crtc_get_scanoutpos,/* called unconditionally */
4451 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4452 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4453 };
4454 
4455 #if defined(CONFIG_DEBUG_KERNEL_DC)
4456 
4457 static ssize_t s3_debug_store(struct device *device,
4458 			      struct device_attribute *attr,
4459 			      const char *buf,
4460 			      size_t count)
4461 {
4462 	int ret;
4463 	int s3_state;
4464 	struct drm_device *drm_dev = dev_get_drvdata(device);
4465 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4466 
4467 	ret = kstrtoint(buf, 0, &s3_state);
4468 
4469 	if (ret == 0) {
4470 		if (s3_state) {
4471 			dm_resume(adev);
4472 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4473 		} else
4474 			dm_suspend(adev);
4475 	}
4476 
4477 	return ret == 0 ? count : 0;
4478 }
4479 
4480 DEVICE_ATTR_WO(s3_debug);
4481 
4482 #endif
4483 
4484 static int dm_early_init(void *handle)
4485 {
4486 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4487 
4488 	switch (adev->asic_type) {
4489 #if defined(CONFIG_DRM_AMD_DC_SI)
4490 	case CHIP_TAHITI:
4491 	case CHIP_PITCAIRN:
4492 	case CHIP_VERDE:
4493 		adev->mode_info.num_crtc = 6;
4494 		adev->mode_info.num_hpd = 6;
4495 		adev->mode_info.num_dig = 6;
4496 		break;
4497 	case CHIP_OLAND:
4498 		adev->mode_info.num_crtc = 2;
4499 		adev->mode_info.num_hpd = 2;
4500 		adev->mode_info.num_dig = 2;
4501 		break;
4502 #endif
4503 	case CHIP_BONAIRE:
4504 	case CHIP_HAWAII:
4505 		adev->mode_info.num_crtc = 6;
4506 		adev->mode_info.num_hpd = 6;
4507 		adev->mode_info.num_dig = 6;
4508 		break;
4509 	case CHIP_KAVERI:
4510 		adev->mode_info.num_crtc = 4;
4511 		adev->mode_info.num_hpd = 6;
4512 		adev->mode_info.num_dig = 7;
4513 		break;
4514 	case CHIP_KABINI:
4515 	case CHIP_MULLINS:
4516 		adev->mode_info.num_crtc = 2;
4517 		adev->mode_info.num_hpd = 6;
4518 		adev->mode_info.num_dig = 6;
4519 		break;
4520 	case CHIP_FIJI:
4521 	case CHIP_TONGA:
4522 		adev->mode_info.num_crtc = 6;
4523 		adev->mode_info.num_hpd = 6;
4524 		adev->mode_info.num_dig = 7;
4525 		break;
4526 	case CHIP_CARRIZO:
4527 		adev->mode_info.num_crtc = 3;
4528 		adev->mode_info.num_hpd = 6;
4529 		adev->mode_info.num_dig = 9;
4530 		break;
4531 	case CHIP_STONEY:
4532 		adev->mode_info.num_crtc = 2;
4533 		adev->mode_info.num_hpd = 6;
4534 		adev->mode_info.num_dig = 9;
4535 		break;
4536 	case CHIP_POLARIS11:
4537 	case CHIP_POLARIS12:
4538 		adev->mode_info.num_crtc = 5;
4539 		adev->mode_info.num_hpd = 5;
4540 		adev->mode_info.num_dig = 5;
4541 		break;
4542 	case CHIP_POLARIS10:
4543 	case CHIP_VEGAM:
4544 		adev->mode_info.num_crtc = 6;
4545 		adev->mode_info.num_hpd = 6;
4546 		adev->mode_info.num_dig = 6;
4547 		break;
4548 	case CHIP_VEGA10:
4549 	case CHIP_VEGA12:
4550 	case CHIP_VEGA20:
4551 		adev->mode_info.num_crtc = 6;
4552 		adev->mode_info.num_hpd = 6;
4553 		adev->mode_info.num_dig = 6;
4554 		break;
4555 	default:
4556 
4557 		switch (adev->ip_versions[DCE_HWIP][0]) {
4558 		case IP_VERSION(2, 0, 2):
4559 		case IP_VERSION(3, 0, 0):
4560 			adev->mode_info.num_crtc = 6;
4561 			adev->mode_info.num_hpd = 6;
4562 			adev->mode_info.num_dig = 6;
4563 			break;
4564 		case IP_VERSION(2, 0, 0):
4565 		case IP_VERSION(3, 0, 2):
4566 			adev->mode_info.num_crtc = 5;
4567 			adev->mode_info.num_hpd = 5;
4568 			adev->mode_info.num_dig = 5;
4569 			break;
4570 		case IP_VERSION(2, 0, 3):
4571 		case IP_VERSION(3, 0, 3):
4572 			adev->mode_info.num_crtc = 2;
4573 			adev->mode_info.num_hpd = 2;
4574 			adev->mode_info.num_dig = 2;
4575 			break;
4576 		case IP_VERSION(1, 0, 0):
4577 		case IP_VERSION(1, 0, 1):
4578 		case IP_VERSION(3, 0, 1):
4579 		case IP_VERSION(2, 1, 0):
4580 		case IP_VERSION(3, 1, 2):
4581 		case IP_VERSION(3, 1, 3):
4582 		case IP_VERSION(3, 1, 5):
4583 		case IP_VERSION(3, 1, 6):
4584 		case IP_VERSION(3, 2, 0):
4585 		case IP_VERSION(3, 2, 1):
4586 			adev->mode_info.num_crtc = 4;
4587 			adev->mode_info.num_hpd = 4;
4588 			adev->mode_info.num_dig = 4;
4589 			break;
4590 		default:
4591 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4592 					adev->ip_versions[DCE_HWIP][0]);
4593 			return -EINVAL;
4594 		}
4595 		break;
4596 	}
4597 
4598 	amdgpu_dm_set_irq_funcs(adev);
4599 
4600 	if (adev->mode_info.funcs == NULL)
4601 		adev->mode_info.funcs = &dm_display_funcs;
4602 
4603 	/*
4604 	 * Note: Do NOT change adev->audio_endpt_rreg and
4605 	 * adev->audio_endpt_wreg because they are initialised in
4606 	 * amdgpu_device_init()
4607 	 */
4608 #if defined(CONFIG_DEBUG_KERNEL_DC)
4609 	device_create_file(
4610 		adev_to_drm(adev)->dev,
4611 		&dev_attr_s3_debug);
4612 #endif
4613 
4614 	return 0;
4615 }
4616 
4617 static bool modeset_required(struct drm_crtc_state *crtc_state,
4618 			     struct dc_stream_state *new_stream,
4619 			     struct dc_stream_state *old_stream)
4620 {
4621 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4622 }
4623 
4624 static bool modereset_required(struct drm_crtc_state *crtc_state)
4625 {
4626 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4627 }
4628 
4629 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4630 {
4631 	drm_encoder_cleanup(encoder);
4632 	kfree(encoder);
4633 }
4634 
4635 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4636 	.destroy = amdgpu_dm_encoder_destroy,
4637 };
4638 
4639 
4640 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4641 					 struct drm_framebuffer *fb,
4642 					 int *min_downscale, int *max_upscale)
4643 {
4644 	struct amdgpu_device *adev = drm_to_adev(dev);
4645 	struct dc *dc = adev->dm.dc;
4646 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4647 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4648 
4649 	switch (fb->format->format) {
4650 	case DRM_FORMAT_P010:
4651 	case DRM_FORMAT_NV12:
4652 	case DRM_FORMAT_NV21:
4653 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4654 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4655 		break;
4656 
4657 	case DRM_FORMAT_XRGB16161616F:
4658 	case DRM_FORMAT_ARGB16161616F:
4659 	case DRM_FORMAT_XBGR16161616F:
4660 	case DRM_FORMAT_ABGR16161616F:
4661 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4662 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4663 		break;
4664 
4665 	default:
4666 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4667 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4668 		break;
4669 	}
4670 
4671 	/*
4672 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4673 	 * scaling factor of 1.0 == 1000 units.
4674 	 */
4675 	if (*max_upscale == 1)
4676 		*max_upscale = 1000;
4677 
4678 	if (*min_downscale == 1)
4679 		*min_downscale = 1000;
4680 }
4681 
4682 
4683 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4684 				const struct drm_plane_state *state,
4685 				struct dc_scaling_info *scaling_info)
4686 {
4687 	int scale_w, scale_h, min_downscale, max_upscale;
4688 
4689 	memset(scaling_info, 0, sizeof(*scaling_info));
4690 
4691 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4692 	scaling_info->src_rect.x = state->src_x >> 16;
4693 	scaling_info->src_rect.y = state->src_y >> 16;
4694 
4695 	/*
4696 	 * For reasons we don't (yet) fully understand a non-zero
4697 	 * src_y coordinate into an NV12 buffer can cause a
4698 	 * system hang on DCN1x.
4699 	 * To avoid hangs (and maybe be overly cautious)
4700 	 * let's reject both non-zero src_x and src_y.
4701 	 *
4702 	 * We currently know of only one use-case to reproduce a
4703 	 * scenario with non-zero src_x and src_y for NV12, which
4704 	 * is to gesture the YouTube Android app into full screen
4705 	 * on ChromeOS.
4706 	 */
4707 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4708 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4709 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4710 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4711 		return -EINVAL;
4712 
4713 	scaling_info->src_rect.width = state->src_w >> 16;
4714 	if (scaling_info->src_rect.width == 0)
4715 		return -EINVAL;
4716 
4717 	scaling_info->src_rect.height = state->src_h >> 16;
4718 	if (scaling_info->src_rect.height == 0)
4719 		return -EINVAL;
4720 
4721 	scaling_info->dst_rect.x = state->crtc_x;
4722 	scaling_info->dst_rect.y = state->crtc_y;
4723 
4724 	if (state->crtc_w == 0)
4725 		return -EINVAL;
4726 
4727 	scaling_info->dst_rect.width = state->crtc_w;
4728 
4729 	if (state->crtc_h == 0)
4730 		return -EINVAL;
4731 
4732 	scaling_info->dst_rect.height = state->crtc_h;
4733 
4734 	/* DRM doesn't specify clipping on destination output. */
4735 	scaling_info->clip_rect = scaling_info->dst_rect;
4736 
4737 	/* Validate scaling per-format with DC plane caps */
4738 	if (state->plane && state->plane->dev && state->fb) {
4739 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4740 					     &min_downscale, &max_upscale);
4741 	} else {
4742 		min_downscale = 250;
4743 		max_upscale = 16000;
4744 	}
4745 
4746 	scale_w = scaling_info->dst_rect.width * 1000 /
4747 		  scaling_info->src_rect.width;
4748 
4749 	if (scale_w < min_downscale || scale_w > max_upscale)
4750 		return -EINVAL;
4751 
4752 	scale_h = scaling_info->dst_rect.height * 1000 /
4753 		  scaling_info->src_rect.height;
4754 
4755 	if (scale_h < min_downscale || scale_h > max_upscale)
4756 		return -EINVAL;
4757 
4758 	/*
4759 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4760 	 * assume reasonable defaults based on the format.
4761 	 */
4762 
4763 	return 0;
4764 }
4765 
4766 static void
4767 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4768 				 uint64_t tiling_flags)
4769 {
4770 	/* Fill GFX8 params */
4771 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4772 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4773 
4774 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4775 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4776 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4777 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4778 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4779 
4780 		/* XXX fix me for VI */
4781 		tiling_info->gfx8.num_banks = num_banks;
4782 		tiling_info->gfx8.array_mode =
4783 				DC_ARRAY_2D_TILED_THIN1;
4784 		tiling_info->gfx8.tile_split = tile_split;
4785 		tiling_info->gfx8.bank_width = bankw;
4786 		tiling_info->gfx8.bank_height = bankh;
4787 		tiling_info->gfx8.tile_aspect = mtaspect;
4788 		tiling_info->gfx8.tile_mode =
4789 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4790 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4791 			== DC_ARRAY_1D_TILED_THIN1) {
4792 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4793 	}
4794 
4795 	tiling_info->gfx8.pipe_config =
4796 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4797 }
4798 
4799 static void
4800 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4801 				  union dc_tiling_info *tiling_info)
4802 {
4803 	tiling_info->gfx9.num_pipes =
4804 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4805 	tiling_info->gfx9.num_banks =
4806 		adev->gfx.config.gb_addr_config_fields.num_banks;
4807 	tiling_info->gfx9.pipe_interleave =
4808 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4809 	tiling_info->gfx9.num_shader_engines =
4810 		adev->gfx.config.gb_addr_config_fields.num_se;
4811 	tiling_info->gfx9.max_compressed_frags =
4812 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4813 	tiling_info->gfx9.num_rb_per_se =
4814 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4815 	tiling_info->gfx9.shaderEnable = 1;
4816 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4817 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4818 }
4819 
4820 static int
4821 validate_dcc(struct amdgpu_device *adev,
4822 	     const enum surface_pixel_format format,
4823 	     const enum dc_rotation_angle rotation,
4824 	     const union dc_tiling_info *tiling_info,
4825 	     const struct dc_plane_dcc_param *dcc,
4826 	     const struct dc_plane_address *address,
4827 	     const struct plane_size *plane_size)
4828 {
4829 	struct dc *dc = adev->dm.dc;
4830 	struct dc_dcc_surface_param input;
4831 	struct dc_surface_dcc_cap output;
4832 
4833 	memset(&input, 0, sizeof(input));
4834 	memset(&output, 0, sizeof(output));
4835 
4836 	if (!dcc->enable)
4837 		return 0;
4838 
4839 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4840 	    !dc->cap_funcs.get_dcc_compression_cap)
4841 		return -EINVAL;
4842 
4843 	input.format = format;
4844 	input.surface_size.width = plane_size->surface_size.width;
4845 	input.surface_size.height = plane_size->surface_size.height;
4846 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4847 
4848 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4849 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4850 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4851 		input.scan = SCAN_DIRECTION_VERTICAL;
4852 
4853 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4854 		return -EINVAL;
4855 
4856 	if (!output.capable)
4857 		return -EINVAL;
4858 
4859 	if (dcc->independent_64b_blks == 0 &&
4860 	    output.grph.rgb.independent_64b_blks != 0)
4861 		return -EINVAL;
4862 
4863 	return 0;
4864 }
4865 
4866 static bool
4867 modifier_has_dcc(uint64_t modifier)
4868 {
4869 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4870 }
4871 
4872 static unsigned
4873 modifier_gfx9_swizzle_mode(uint64_t modifier)
4874 {
4875 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4876 		return 0;
4877 
4878 	return AMD_FMT_MOD_GET(TILE, modifier);
4879 }
4880 
4881 static const struct drm_format_info *
4882 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4883 {
4884 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4885 }
4886 
4887 static void
4888 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4889 				    union dc_tiling_info *tiling_info,
4890 				    uint64_t modifier)
4891 {
4892 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4893 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4894 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4895 	unsigned int pipes_log2;
4896 
4897 	pipes_log2 = min(5u, mod_pipe_xor_bits);
4898 
4899 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4900 
4901 	if (!IS_AMD_FMT_MOD(modifier))
4902 		return;
4903 
4904 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4905 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4906 
4907 	if (adev->family >= AMDGPU_FAMILY_NV) {
4908 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4909 	} else {
4910 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4911 
4912 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4913 	}
4914 }
4915 
4916 enum dm_micro_swizzle {
4917 	MICRO_SWIZZLE_Z = 0,
4918 	MICRO_SWIZZLE_S = 1,
4919 	MICRO_SWIZZLE_D = 2,
4920 	MICRO_SWIZZLE_R = 3
4921 };
4922 
4923 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4924 					  uint32_t format,
4925 					  uint64_t modifier)
4926 {
4927 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4928 	const struct drm_format_info *info = drm_format_info(format);
4929 	int i;
4930 
4931 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4932 
4933 	if (!info)
4934 		return false;
4935 
4936 	/*
4937 	 * We always have to allow these modifiers:
4938 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4939 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4940 	 */
4941 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4942 	    modifier == DRM_FORMAT_MOD_INVALID) {
4943 		return true;
4944 	}
4945 
4946 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4947 	for (i = 0; i < plane->modifier_count; i++) {
4948 		if (modifier == plane->modifiers[i])
4949 			break;
4950 	}
4951 	if (i == plane->modifier_count)
4952 		return false;
4953 
4954 	/*
4955 	 * For D swizzle the canonical modifier depends on the bpp, so check
4956 	 * it here.
4957 	 */
4958 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4959 	    adev->family >= AMDGPU_FAMILY_NV) {
4960 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4961 			return false;
4962 	}
4963 
4964 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4965 	    info->cpp[0] < 8)
4966 		return false;
4967 
4968 	if (modifier_has_dcc(modifier)) {
4969 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4970 		if (info->cpp[0] != 4)
4971 			return false;
4972 		/* We support multi-planar formats, but not when combined with
4973 		 * additional DCC metadata planes. */
4974 		if (info->num_planes > 1)
4975 			return false;
4976 	}
4977 
4978 	return true;
4979 }
4980 
4981 static void
4982 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4983 {
4984 	if (!*mods)
4985 		return;
4986 
4987 	if (*cap - *size < 1) {
4988 		uint64_t new_cap = *cap * 2;
4989 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4990 
4991 		if (!new_mods) {
4992 			kfree(*mods);
4993 			*mods = NULL;
4994 			return;
4995 		}
4996 
4997 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4998 		kfree(*mods);
4999 		*mods = new_mods;
5000 		*cap = new_cap;
5001 	}
5002 
5003 	(*mods)[*size] = mod;
5004 	*size += 1;
5005 }
5006 
5007 static void
5008 add_gfx9_modifiers(const struct amdgpu_device *adev,
5009 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5010 {
5011 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5012 	int pipe_xor_bits = min(8, pipes +
5013 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5014 	int bank_xor_bits = min(8 - pipe_xor_bits,
5015 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5016 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5017 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5018 
5019 
5020 	if (adev->family == AMDGPU_FAMILY_RV) {
5021 		/* Raven2 and later */
5022 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5023 
5024 		/*
5025 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5026 		 * doesn't support _D on DCN
5027 		 */
5028 
5029 		if (has_constant_encode) {
5030 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5031 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5032 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5033 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5034 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5035 				    AMD_FMT_MOD_SET(DCC, 1) |
5036 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5037 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5038 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5039 		}
5040 
5041 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5042 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5043 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5044 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5045 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5046 			    AMD_FMT_MOD_SET(DCC, 1) |
5047 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5048 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5049 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5050 
5051 		if (has_constant_encode) {
5052 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5053 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5054 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5055 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5056 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5057 				    AMD_FMT_MOD_SET(DCC, 1) |
5058 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5059 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5060 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5061 
5062 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5063 				    AMD_FMT_MOD_SET(RB, rb) |
5064 				    AMD_FMT_MOD_SET(PIPE, pipes));
5065 		}
5066 
5067 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5068 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5069 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5070 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5071 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5072 			    AMD_FMT_MOD_SET(DCC, 1) |
5073 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5074 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5075 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5076 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5077 			    AMD_FMT_MOD_SET(RB, rb) |
5078 			    AMD_FMT_MOD_SET(PIPE, pipes));
5079 	}
5080 
5081 	/*
5082 	 * Only supported for 64bpp on Raven, will be filtered on format in
5083 	 * dm_plane_format_mod_supported.
5084 	 */
5085 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5086 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5087 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5088 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5089 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5090 
5091 	if (adev->family == AMDGPU_FAMILY_RV) {
5092 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5093 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5094 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5095 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5096 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5097 	}
5098 
5099 	/*
5100 	 * Only supported for 64bpp on Raven, will be filtered on format in
5101 	 * dm_plane_format_mod_supported.
5102 	 */
5103 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5104 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5105 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5106 
5107 	if (adev->family == AMDGPU_FAMILY_RV) {
5108 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5110 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5111 	}
5112 }
5113 
5114 static void
5115 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5116 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5117 {
5118 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5119 
5120 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5121 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5122 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5123 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5124 		    AMD_FMT_MOD_SET(DCC, 1) |
5125 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5126 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5127 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5128 
5129 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5130 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5131 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5132 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5133 		    AMD_FMT_MOD_SET(DCC, 1) |
5134 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5135 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5136 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5137 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5138 
5139 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5140 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5141 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5142 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5143 
5144 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5145 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5146 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5147 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5148 
5149 
5150 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5151 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5152 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5153 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5154 
5155 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5156 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5157 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5158 }
5159 
5160 static void
5161 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5162 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5163 {
5164 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5165 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5166 
5167 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5168 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5169 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5170 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5171 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5172 		    AMD_FMT_MOD_SET(DCC, 1) |
5173 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5174 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5175 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5176 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5177 
5178 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5179 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5180 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5181 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5182 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5183 		    AMD_FMT_MOD_SET(DCC, 1) |
5184 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5185 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5186 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5187 
5188 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5189 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5190 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5191 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5192 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5193 		    AMD_FMT_MOD_SET(DCC, 1) |
5194 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5195 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5196 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5197 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5198 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5199 
5200 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5201 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5202 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5203 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5204 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5205 		    AMD_FMT_MOD_SET(DCC, 1) |
5206 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5207 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5208 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5209 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5210 
5211 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5212 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5213 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5214 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5215 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5216 
5217 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5218 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5219 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5220 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5221 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5222 
5223 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5224 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5225 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5226 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5227 
5228 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5229 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5230 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5231 }
5232 
5233 static void
5234 add_gfx11_modifiers(struct amdgpu_device *adev,
5235 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5236 {
5237 	int num_pipes = 0;
5238 	int pipe_xor_bits = 0;
5239 	int num_pkrs = 0;
5240 	int pkrs = 0;
5241 	u32 gb_addr_config;
5242 	unsigned swizzle_r_x;
5243 	uint64_t modifier_r_x;
5244 	uint64_t modifier_dcc_best;
5245 	uint64_t modifier_dcc_4k;
5246 
5247 	/* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5248 	 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5249 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5250 	ASSERT(gb_addr_config != 0);
5251 
5252 	num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5253 	pkrs = ilog2(num_pkrs);
5254 	num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5255 	pipe_xor_bits = ilog2(num_pipes);
5256 
5257 	/* R_X swizzle modes are the best for rendering and DCC requires them. */
5258 	swizzle_r_x = num_pipes > 16 ? AMD_FMT_MOD_TILE_GFX11_256K_R_X :
5259                                               AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5260 
5261 	modifier_r_x = AMD_FMT_MOD |
5262 		AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5263 		AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5264 		AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5265 		AMD_FMT_MOD_SET(PACKERS, pkrs);
5266 
5267 	/* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5268 	modifier_dcc_best = modifier_r_x |
5269 		AMD_FMT_MOD_SET(DCC, 1) |
5270 		AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5271 		AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5272 		AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5273 
5274 	/* DCC settings for 4K and greater resolutions. (required by display hw) */
5275 	modifier_dcc_4k = modifier_r_x |
5276 			AMD_FMT_MOD_SET(DCC, 1) |
5277 			AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5278 			AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5279 			AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5280 
5281 	add_modifier(mods, size, capacity, modifier_dcc_best);
5282 	add_modifier(mods, size, capacity, modifier_dcc_4k);
5283 
5284 	add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5285 	add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5286 
5287 	add_modifier(mods, size, capacity, modifier_r_x);
5288 
5289 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5290              AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5291 			 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5292 }
5293 
5294 static int
5295 get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5296 {
5297 	uint64_t size = 0, capacity = 128;
5298 	*mods = NULL;
5299 
5300 	/* We have not hooked up any pre-GFX9 modifiers. */
5301 	if (adev->family < AMDGPU_FAMILY_AI)
5302 		return 0;
5303 
5304 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5305 
5306 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5307 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5308 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5309 		return *mods ? 0 : -ENOMEM;
5310 	}
5311 
5312 	switch (adev->family) {
5313 	case AMDGPU_FAMILY_AI:
5314 	case AMDGPU_FAMILY_RV:
5315 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5316 		break;
5317 	case AMDGPU_FAMILY_NV:
5318 	case AMDGPU_FAMILY_VGH:
5319 	case AMDGPU_FAMILY_YC:
5320 	case AMDGPU_FAMILY_GC_10_3_6:
5321 	case AMDGPU_FAMILY_GC_10_3_7:
5322 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5323 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5324 		else
5325 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5326 		break;
5327 	case AMDGPU_FAMILY_GC_11_0_0:
5328 		add_gfx11_modifiers(adev, mods, &size, &capacity);
5329 		break;
5330 	}
5331 
5332 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5333 
5334 	/* INVALID marks the end of the list. */
5335 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5336 
5337 	if (!*mods)
5338 		return -ENOMEM;
5339 
5340 	return 0;
5341 }
5342 
5343 static int
5344 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5345 					  const struct amdgpu_framebuffer *afb,
5346 					  const enum surface_pixel_format format,
5347 					  const enum dc_rotation_angle rotation,
5348 					  const struct plane_size *plane_size,
5349 					  union dc_tiling_info *tiling_info,
5350 					  struct dc_plane_dcc_param *dcc,
5351 					  struct dc_plane_address *address,
5352 					  const bool force_disable_dcc)
5353 {
5354 	const uint64_t modifier = afb->base.modifier;
5355 	int ret = 0;
5356 
5357 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5358 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5359 
5360 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5361 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5362 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5363 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5364 
5365 		dcc->enable = 1;
5366 		dcc->meta_pitch = afb->base.pitches[1];
5367 		dcc->independent_64b_blks = independent_64b_blks;
5368 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5369 			if (independent_64b_blks && independent_128b_blks)
5370 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5371 			else if (independent_128b_blks)
5372 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5373 			else if (independent_64b_blks && !independent_128b_blks)
5374 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5375 			else
5376 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5377 		} else {
5378 			if (independent_64b_blks)
5379 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5380 			else
5381 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5382 		}
5383 
5384 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5385 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5386 	}
5387 
5388 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5389 	if (ret)
5390 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5391 
5392 	return ret;
5393 }
5394 
5395 static int
5396 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5397 			     const struct amdgpu_framebuffer *afb,
5398 			     const enum surface_pixel_format format,
5399 			     const enum dc_rotation_angle rotation,
5400 			     const uint64_t tiling_flags,
5401 			     union dc_tiling_info *tiling_info,
5402 			     struct plane_size *plane_size,
5403 			     struct dc_plane_dcc_param *dcc,
5404 			     struct dc_plane_address *address,
5405 			     bool tmz_surface,
5406 			     bool force_disable_dcc)
5407 {
5408 	const struct drm_framebuffer *fb = &afb->base;
5409 	int ret;
5410 
5411 	memset(tiling_info, 0, sizeof(*tiling_info));
5412 	memset(plane_size, 0, sizeof(*plane_size));
5413 	memset(dcc, 0, sizeof(*dcc));
5414 	memset(address, 0, sizeof(*address));
5415 
5416 	address->tmz_surface = tmz_surface;
5417 
5418 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5419 		uint64_t addr = afb->address + fb->offsets[0];
5420 
5421 		plane_size->surface_size.x = 0;
5422 		plane_size->surface_size.y = 0;
5423 		plane_size->surface_size.width = fb->width;
5424 		plane_size->surface_size.height = fb->height;
5425 		plane_size->surface_pitch =
5426 			fb->pitches[0] / fb->format->cpp[0];
5427 
5428 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5429 		address->grph.addr.low_part = lower_32_bits(addr);
5430 		address->grph.addr.high_part = upper_32_bits(addr);
5431 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5432 		uint64_t luma_addr = afb->address + fb->offsets[0];
5433 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5434 
5435 		plane_size->surface_size.x = 0;
5436 		plane_size->surface_size.y = 0;
5437 		plane_size->surface_size.width = fb->width;
5438 		plane_size->surface_size.height = fb->height;
5439 		plane_size->surface_pitch =
5440 			fb->pitches[0] / fb->format->cpp[0];
5441 
5442 		plane_size->chroma_size.x = 0;
5443 		plane_size->chroma_size.y = 0;
5444 		/* TODO: set these based on surface format */
5445 		plane_size->chroma_size.width = fb->width / 2;
5446 		plane_size->chroma_size.height = fb->height / 2;
5447 
5448 		plane_size->chroma_pitch =
5449 			fb->pitches[1] / fb->format->cpp[1];
5450 
5451 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5452 		address->video_progressive.luma_addr.low_part =
5453 			lower_32_bits(luma_addr);
5454 		address->video_progressive.luma_addr.high_part =
5455 			upper_32_bits(luma_addr);
5456 		address->video_progressive.chroma_addr.low_part =
5457 			lower_32_bits(chroma_addr);
5458 		address->video_progressive.chroma_addr.high_part =
5459 			upper_32_bits(chroma_addr);
5460 	}
5461 
5462 	if (adev->family >= AMDGPU_FAMILY_AI) {
5463 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5464 								rotation, plane_size,
5465 								tiling_info, dcc,
5466 								address,
5467 								force_disable_dcc);
5468 		if (ret)
5469 			return ret;
5470 	} else {
5471 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5472 	}
5473 
5474 	return 0;
5475 }
5476 
5477 static void
5478 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5479 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5480 			       bool *global_alpha, int *global_alpha_value)
5481 {
5482 	*per_pixel_alpha = false;
5483 	*pre_multiplied_alpha = true;
5484 	*global_alpha = false;
5485 	*global_alpha_value = 0xff;
5486 
5487 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5488 		return;
5489 
5490 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5491 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5492 		static const uint32_t alpha_formats[] = {
5493 			DRM_FORMAT_ARGB8888,
5494 			DRM_FORMAT_RGBA8888,
5495 			DRM_FORMAT_ABGR8888,
5496 		};
5497 		uint32_t format = plane_state->fb->format->format;
5498 		unsigned int i;
5499 
5500 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5501 			if (format == alpha_formats[i]) {
5502 				*per_pixel_alpha = true;
5503 				break;
5504 			}
5505 		}
5506 
5507 		if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5508 			*pre_multiplied_alpha = false;
5509 	}
5510 
5511 	if (plane_state->alpha < 0xffff) {
5512 		*global_alpha = true;
5513 		*global_alpha_value = plane_state->alpha >> 8;
5514 	}
5515 }
5516 
5517 static int
5518 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5519 			    const enum surface_pixel_format format,
5520 			    enum dc_color_space *color_space)
5521 {
5522 	bool full_range;
5523 
5524 	*color_space = COLOR_SPACE_SRGB;
5525 
5526 	/* DRM color properties only affect non-RGB formats. */
5527 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5528 		return 0;
5529 
5530 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5531 
5532 	switch (plane_state->color_encoding) {
5533 	case DRM_COLOR_YCBCR_BT601:
5534 		if (full_range)
5535 			*color_space = COLOR_SPACE_YCBCR601;
5536 		else
5537 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5538 		break;
5539 
5540 	case DRM_COLOR_YCBCR_BT709:
5541 		if (full_range)
5542 			*color_space = COLOR_SPACE_YCBCR709;
5543 		else
5544 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5545 		break;
5546 
5547 	case DRM_COLOR_YCBCR_BT2020:
5548 		if (full_range)
5549 			*color_space = COLOR_SPACE_2020_YCBCR;
5550 		else
5551 			return -EINVAL;
5552 		break;
5553 
5554 	default:
5555 		return -EINVAL;
5556 	}
5557 
5558 	return 0;
5559 }
5560 
5561 static int
5562 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5563 			    const struct drm_plane_state *plane_state,
5564 			    const uint64_t tiling_flags,
5565 			    struct dc_plane_info *plane_info,
5566 			    struct dc_plane_address *address,
5567 			    bool tmz_surface,
5568 			    bool force_disable_dcc)
5569 {
5570 	const struct drm_framebuffer *fb = plane_state->fb;
5571 	const struct amdgpu_framebuffer *afb =
5572 		to_amdgpu_framebuffer(plane_state->fb);
5573 	int ret;
5574 
5575 	memset(plane_info, 0, sizeof(*plane_info));
5576 
5577 	switch (fb->format->format) {
5578 	case DRM_FORMAT_C8:
5579 		plane_info->format =
5580 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5581 		break;
5582 	case DRM_FORMAT_RGB565:
5583 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5584 		break;
5585 	case DRM_FORMAT_XRGB8888:
5586 	case DRM_FORMAT_ARGB8888:
5587 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5588 		break;
5589 	case DRM_FORMAT_XRGB2101010:
5590 	case DRM_FORMAT_ARGB2101010:
5591 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5592 		break;
5593 	case DRM_FORMAT_XBGR2101010:
5594 	case DRM_FORMAT_ABGR2101010:
5595 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5596 		break;
5597 	case DRM_FORMAT_XBGR8888:
5598 	case DRM_FORMAT_ABGR8888:
5599 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5600 		break;
5601 	case DRM_FORMAT_NV21:
5602 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5603 		break;
5604 	case DRM_FORMAT_NV12:
5605 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5606 		break;
5607 	case DRM_FORMAT_P010:
5608 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5609 		break;
5610 	case DRM_FORMAT_XRGB16161616F:
5611 	case DRM_FORMAT_ARGB16161616F:
5612 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5613 		break;
5614 	case DRM_FORMAT_XBGR16161616F:
5615 	case DRM_FORMAT_ABGR16161616F:
5616 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5617 		break;
5618 	case DRM_FORMAT_XRGB16161616:
5619 	case DRM_FORMAT_ARGB16161616:
5620 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5621 		break;
5622 	case DRM_FORMAT_XBGR16161616:
5623 	case DRM_FORMAT_ABGR16161616:
5624 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5625 		break;
5626 	default:
5627 		DRM_ERROR(
5628 			"Unsupported screen format %p4cc\n",
5629 			&fb->format->format);
5630 		return -EINVAL;
5631 	}
5632 
5633 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5634 	case DRM_MODE_ROTATE_0:
5635 		plane_info->rotation = ROTATION_ANGLE_0;
5636 		break;
5637 	case DRM_MODE_ROTATE_90:
5638 		plane_info->rotation = ROTATION_ANGLE_90;
5639 		break;
5640 	case DRM_MODE_ROTATE_180:
5641 		plane_info->rotation = ROTATION_ANGLE_180;
5642 		break;
5643 	case DRM_MODE_ROTATE_270:
5644 		plane_info->rotation = ROTATION_ANGLE_270;
5645 		break;
5646 	default:
5647 		plane_info->rotation = ROTATION_ANGLE_0;
5648 		break;
5649 	}
5650 
5651 	plane_info->visible = true;
5652 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5653 
5654 	plane_info->layer_index = 0;
5655 
5656 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5657 					  &plane_info->color_space);
5658 	if (ret)
5659 		return ret;
5660 
5661 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5662 					   plane_info->rotation, tiling_flags,
5663 					   &plane_info->tiling_info,
5664 					   &plane_info->plane_size,
5665 					   &plane_info->dcc, address, tmz_surface,
5666 					   force_disable_dcc);
5667 	if (ret)
5668 		return ret;
5669 
5670 	fill_blending_from_plane_state(
5671 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5672 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5673 
5674 	return 0;
5675 }
5676 
5677 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5678 				    struct dc_plane_state *dc_plane_state,
5679 				    struct drm_plane_state *plane_state,
5680 				    struct drm_crtc_state *crtc_state)
5681 {
5682 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5683 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5684 	struct dc_scaling_info scaling_info;
5685 	struct dc_plane_info plane_info;
5686 	int ret;
5687 	bool force_disable_dcc = false;
5688 
5689 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5690 	if (ret)
5691 		return ret;
5692 
5693 	dc_plane_state->src_rect = scaling_info.src_rect;
5694 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5695 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5696 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5697 
5698 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5699 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5700 					  afb->tiling_flags,
5701 					  &plane_info,
5702 					  &dc_plane_state->address,
5703 					  afb->tmz_surface,
5704 					  force_disable_dcc);
5705 	if (ret)
5706 		return ret;
5707 
5708 	dc_plane_state->format = plane_info.format;
5709 	dc_plane_state->color_space = plane_info.color_space;
5710 	dc_plane_state->format = plane_info.format;
5711 	dc_plane_state->plane_size = plane_info.plane_size;
5712 	dc_plane_state->rotation = plane_info.rotation;
5713 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5714 	dc_plane_state->stereo_format = plane_info.stereo_format;
5715 	dc_plane_state->tiling_info = plane_info.tiling_info;
5716 	dc_plane_state->visible = plane_info.visible;
5717 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5718 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5719 	dc_plane_state->global_alpha = plane_info.global_alpha;
5720 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5721 	dc_plane_state->dcc = plane_info.dcc;
5722 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5723 	dc_plane_state->flip_int_enabled = true;
5724 
5725 	/*
5726 	 * Always set input transfer function, since plane state is refreshed
5727 	 * every time.
5728 	 */
5729 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5730 	if (ret)
5731 		return ret;
5732 
5733 	return 0;
5734 }
5735 
5736 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5737 					   const struct dm_connector_state *dm_state,
5738 					   struct dc_stream_state *stream)
5739 {
5740 	enum amdgpu_rmx_type rmx_type;
5741 
5742 	struct rect src = { 0 }; /* viewport in composition space*/
5743 	struct rect dst = { 0 }; /* stream addressable area */
5744 
5745 	/* no mode. nothing to be done */
5746 	if (!mode)
5747 		return;
5748 
5749 	/* Full screen scaling by default */
5750 	src.width = mode->hdisplay;
5751 	src.height = mode->vdisplay;
5752 	dst.width = stream->timing.h_addressable;
5753 	dst.height = stream->timing.v_addressable;
5754 
5755 	if (dm_state) {
5756 		rmx_type = dm_state->scaling;
5757 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5758 			if (src.width * dst.height <
5759 					src.height * dst.width) {
5760 				/* height needs less upscaling/more downscaling */
5761 				dst.width = src.width *
5762 						dst.height / src.height;
5763 			} else {
5764 				/* width needs less upscaling/more downscaling */
5765 				dst.height = src.height *
5766 						dst.width / src.width;
5767 			}
5768 		} else if (rmx_type == RMX_CENTER) {
5769 			dst = src;
5770 		}
5771 
5772 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5773 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5774 
5775 		if (dm_state->underscan_enable) {
5776 			dst.x += dm_state->underscan_hborder / 2;
5777 			dst.y += dm_state->underscan_vborder / 2;
5778 			dst.width -= dm_state->underscan_hborder;
5779 			dst.height -= dm_state->underscan_vborder;
5780 		}
5781 	}
5782 
5783 	stream->src = src;
5784 	stream->dst = dst;
5785 
5786 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5787 		      dst.x, dst.y, dst.width, dst.height);
5788 
5789 }
5790 
5791 static enum dc_color_depth
5792 convert_color_depth_from_display_info(const struct drm_connector *connector,
5793 				      bool is_y420, int requested_bpc)
5794 {
5795 	uint8_t bpc;
5796 
5797 	if (is_y420) {
5798 		bpc = 8;
5799 
5800 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5801 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5802 			bpc = 16;
5803 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5804 			bpc = 12;
5805 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5806 			bpc = 10;
5807 	} else {
5808 		bpc = (uint8_t)connector->display_info.bpc;
5809 		/* Assume 8 bpc by default if no bpc is specified. */
5810 		bpc = bpc ? bpc : 8;
5811 	}
5812 
5813 	if (requested_bpc > 0) {
5814 		/*
5815 		 * Cap display bpc based on the user requested value.
5816 		 *
5817 		 * The value for state->max_bpc may not correctly updated
5818 		 * depending on when the connector gets added to the state
5819 		 * or if this was called outside of atomic check, so it
5820 		 * can't be used directly.
5821 		 */
5822 		bpc = min_t(u8, bpc, requested_bpc);
5823 
5824 		/* Round down to the nearest even number. */
5825 		bpc = bpc - (bpc & 1);
5826 	}
5827 
5828 	switch (bpc) {
5829 	case 0:
5830 		/*
5831 		 * Temporary Work around, DRM doesn't parse color depth for
5832 		 * EDID revision before 1.4
5833 		 * TODO: Fix edid parsing
5834 		 */
5835 		return COLOR_DEPTH_888;
5836 	case 6:
5837 		return COLOR_DEPTH_666;
5838 	case 8:
5839 		return COLOR_DEPTH_888;
5840 	case 10:
5841 		return COLOR_DEPTH_101010;
5842 	case 12:
5843 		return COLOR_DEPTH_121212;
5844 	case 14:
5845 		return COLOR_DEPTH_141414;
5846 	case 16:
5847 		return COLOR_DEPTH_161616;
5848 	default:
5849 		return COLOR_DEPTH_UNDEFINED;
5850 	}
5851 }
5852 
5853 static enum dc_aspect_ratio
5854 get_aspect_ratio(const struct drm_display_mode *mode_in)
5855 {
5856 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5857 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5858 }
5859 
5860 static enum dc_color_space
5861 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5862 {
5863 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5864 
5865 	switch (dc_crtc_timing->pixel_encoding)	{
5866 	case PIXEL_ENCODING_YCBCR422:
5867 	case PIXEL_ENCODING_YCBCR444:
5868 	case PIXEL_ENCODING_YCBCR420:
5869 	{
5870 		/*
5871 		 * 27030khz is the separation point between HDTV and SDTV
5872 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5873 		 * respectively
5874 		 */
5875 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5876 			if (dc_crtc_timing->flags.Y_ONLY)
5877 				color_space =
5878 					COLOR_SPACE_YCBCR709_LIMITED;
5879 			else
5880 				color_space = COLOR_SPACE_YCBCR709;
5881 		} else {
5882 			if (dc_crtc_timing->flags.Y_ONLY)
5883 				color_space =
5884 					COLOR_SPACE_YCBCR601_LIMITED;
5885 			else
5886 				color_space = COLOR_SPACE_YCBCR601;
5887 		}
5888 
5889 	}
5890 	break;
5891 	case PIXEL_ENCODING_RGB:
5892 		color_space = COLOR_SPACE_SRGB;
5893 		break;
5894 
5895 	default:
5896 		WARN_ON(1);
5897 		break;
5898 	}
5899 
5900 	return color_space;
5901 }
5902 
5903 static bool adjust_colour_depth_from_display_info(
5904 	struct dc_crtc_timing *timing_out,
5905 	const struct drm_display_info *info)
5906 {
5907 	enum dc_color_depth depth = timing_out->display_color_depth;
5908 	int normalized_clk;
5909 	do {
5910 		normalized_clk = timing_out->pix_clk_100hz / 10;
5911 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5912 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5913 			normalized_clk /= 2;
5914 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5915 		switch (depth) {
5916 		case COLOR_DEPTH_888:
5917 			break;
5918 		case COLOR_DEPTH_101010:
5919 			normalized_clk = (normalized_clk * 30) / 24;
5920 			break;
5921 		case COLOR_DEPTH_121212:
5922 			normalized_clk = (normalized_clk * 36) / 24;
5923 			break;
5924 		case COLOR_DEPTH_161616:
5925 			normalized_clk = (normalized_clk * 48) / 24;
5926 			break;
5927 		default:
5928 			/* The above depths are the only ones valid for HDMI. */
5929 			return false;
5930 		}
5931 		if (normalized_clk <= info->max_tmds_clock) {
5932 			timing_out->display_color_depth = depth;
5933 			return true;
5934 		}
5935 	} while (--depth > COLOR_DEPTH_666);
5936 	return false;
5937 }
5938 
5939 static void fill_stream_properties_from_drm_display_mode(
5940 	struct dc_stream_state *stream,
5941 	const struct drm_display_mode *mode_in,
5942 	const struct drm_connector *connector,
5943 	const struct drm_connector_state *connector_state,
5944 	const struct dc_stream_state *old_stream,
5945 	int requested_bpc)
5946 {
5947 	struct dc_crtc_timing *timing_out = &stream->timing;
5948 	const struct drm_display_info *info = &connector->display_info;
5949 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5950 	struct hdmi_vendor_infoframe hv_frame;
5951 	struct hdmi_avi_infoframe avi_frame;
5952 
5953 	memset(&hv_frame, 0, sizeof(hv_frame));
5954 	memset(&avi_frame, 0, sizeof(avi_frame));
5955 
5956 	timing_out->h_border_left = 0;
5957 	timing_out->h_border_right = 0;
5958 	timing_out->v_border_top = 0;
5959 	timing_out->v_border_bottom = 0;
5960 	/* TODO: un-hardcode */
5961 	if (drm_mode_is_420_only(info, mode_in)
5962 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5963 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5964 	else if (drm_mode_is_420_also(info, mode_in)
5965 			&& aconnector->force_yuv420_output)
5966 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5967 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5968 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5969 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5970 	else
5971 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5972 
5973 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5974 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5975 		connector,
5976 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5977 		requested_bpc);
5978 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5979 	timing_out->hdmi_vic = 0;
5980 
5981 	if(old_stream) {
5982 		timing_out->vic = old_stream->timing.vic;
5983 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5984 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5985 	} else {
5986 		timing_out->vic = drm_match_cea_mode(mode_in);
5987 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5988 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5989 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5990 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5991 	}
5992 
5993 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5994 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5995 		timing_out->vic = avi_frame.video_code;
5996 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5997 		timing_out->hdmi_vic = hv_frame.vic;
5998 	}
5999 
6000 	if (is_freesync_video_mode(mode_in, aconnector)) {
6001 		timing_out->h_addressable = mode_in->hdisplay;
6002 		timing_out->h_total = mode_in->htotal;
6003 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6004 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6005 		timing_out->v_total = mode_in->vtotal;
6006 		timing_out->v_addressable = mode_in->vdisplay;
6007 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6008 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6009 		timing_out->pix_clk_100hz = mode_in->clock * 10;
6010 	} else {
6011 		timing_out->h_addressable = mode_in->crtc_hdisplay;
6012 		timing_out->h_total = mode_in->crtc_htotal;
6013 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6014 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6015 		timing_out->v_total = mode_in->crtc_vtotal;
6016 		timing_out->v_addressable = mode_in->crtc_vdisplay;
6017 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6018 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6019 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6020 	}
6021 
6022 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
6023 
6024 	stream->output_color_space = get_output_color_space(timing_out);
6025 
6026 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6027 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
6028 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6029 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6030 		    drm_mode_is_420_also(info, mode_in) &&
6031 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6032 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6033 			adjust_colour_depth_from_display_info(timing_out, info);
6034 		}
6035 	}
6036 }
6037 
6038 static void fill_audio_info(struct audio_info *audio_info,
6039 			    const struct drm_connector *drm_connector,
6040 			    const struct dc_sink *dc_sink)
6041 {
6042 	int i = 0;
6043 	int cea_revision = 0;
6044 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6045 
6046 	audio_info->manufacture_id = edid_caps->manufacturer_id;
6047 	audio_info->product_id = edid_caps->product_id;
6048 
6049 	cea_revision = drm_connector->display_info.cea_rev;
6050 
6051 	strscpy(audio_info->display_name,
6052 		edid_caps->display_name,
6053 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6054 
6055 	if (cea_revision >= 3) {
6056 		audio_info->mode_count = edid_caps->audio_mode_count;
6057 
6058 		for (i = 0; i < audio_info->mode_count; ++i) {
6059 			audio_info->modes[i].format_code =
6060 					(enum audio_format_code)
6061 					(edid_caps->audio_modes[i].format_code);
6062 			audio_info->modes[i].channel_count =
6063 					edid_caps->audio_modes[i].channel_count;
6064 			audio_info->modes[i].sample_rates.all =
6065 					edid_caps->audio_modes[i].sample_rate;
6066 			audio_info->modes[i].sample_size =
6067 					edid_caps->audio_modes[i].sample_size;
6068 		}
6069 	}
6070 
6071 	audio_info->flags.all = edid_caps->speaker_flags;
6072 
6073 	/* TODO: We only check for the progressive mode, check for interlace mode too */
6074 	if (drm_connector->latency_present[0]) {
6075 		audio_info->video_latency = drm_connector->video_latency[0];
6076 		audio_info->audio_latency = drm_connector->audio_latency[0];
6077 	}
6078 
6079 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6080 
6081 }
6082 
6083 static void
6084 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6085 				      struct drm_display_mode *dst_mode)
6086 {
6087 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6088 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6089 	dst_mode->crtc_clock = src_mode->crtc_clock;
6090 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6091 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6092 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6093 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6094 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6095 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6096 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6097 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6098 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6099 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6100 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6101 }
6102 
6103 static void
6104 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6105 					const struct drm_display_mode *native_mode,
6106 					bool scale_enabled)
6107 {
6108 	if (scale_enabled) {
6109 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6110 	} else if (native_mode->clock == drm_mode->clock &&
6111 			native_mode->htotal == drm_mode->htotal &&
6112 			native_mode->vtotal == drm_mode->vtotal) {
6113 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6114 	} else {
6115 		/* no scaling nor amdgpu inserted, no need to patch */
6116 	}
6117 }
6118 
6119 static struct dc_sink *
6120 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6121 {
6122 	struct dc_sink_init_data sink_init_data = { 0 };
6123 	struct dc_sink *sink = NULL;
6124 	sink_init_data.link = aconnector->dc_link;
6125 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6126 
6127 	sink = dc_sink_create(&sink_init_data);
6128 	if (!sink) {
6129 		DRM_ERROR("Failed to create sink!\n");
6130 		return NULL;
6131 	}
6132 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6133 
6134 	return sink;
6135 }
6136 
6137 static void set_multisync_trigger_params(
6138 		struct dc_stream_state *stream)
6139 {
6140 	struct dc_stream_state *master = NULL;
6141 
6142 	if (stream->triggered_crtc_reset.enabled) {
6143 		master = stream->triggered_crtc_reset.event_source;
6144 		stream->triggered_crtc_reset.event =
6145 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6146 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6147 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6148 	}
6149 }
6150 
6151 static void set_master_stream(struct dc_stream_state *stream_set[],
6152 			      int stream_count)
6153 {
6154 	int j, highest_rfr = 0, master_stream = 0;
6155 
6156 	for (j = 0;  j < stream_count; j++) {
6157 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6158 			int refresh_rate = 0;
6159 
6160 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6161 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6162 			if (refresh_rate > highest_rfr) {
6163 				highest_rfr = refresh_rate;
6164 				master_stream = j;
6165 			}
6166 		}
6167 	}
6168 	for (j = 0;  j < stream_count; j++) {
6169 		if (stream_set[j])
6170 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6171 	}
6172 }
6173 
6174 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6175 {
6176 	int i = 0;
6177 	struct dc_stream_state *stream;
6178 
6179 	if (context->stream_count < 2)
6180 		return;
6181 	for (i = 0; i < context->stream_count ; i++) {
6182 		if (!context->streams[i])
6183 			continue;
6184 		/*
6185 		 * TODO: add a function to read AMD VSDB bits and set
6186 		 * crtc_sync_master.multi_sync_enabled flag
6187 		 * For now it's set to false
6188 		 */
6189 	}
6190 
6191 	set_master_stream(context->streams, context->stream_count);
6192 
6193 	for (i = 0; i < context->stream_count ; i++) {
6194 		stream = context->streams[i];
6195 
6196 		if (!stream)
6197 			continue;
6198 
6199 		set_multisync_trigger_params(stream);
6200 	}
6201 }
6202 
6203 #if defined(CONFIG_DRM_AMD_DC_DCN)
6204 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6205 							struct dc_sink *sink, struct dc_stream_state *stream,
6206 							struct dsc_dec_dpcd_caps *dsc_caps)
6207 {
6208 	stream->timing.flags.DSC = 0;
6209 	dsc_caps->is_dsc_supported = false;
6210 
6211 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6212 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6213 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6214 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6215 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6216 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6217 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6218 				dsc_caps);
6219 	}
6220 }
6221 
6222 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6223 				    struct dc_sink *sink, struct dc_stream_state *stream,
6224 				    struct dsc_dec_dpcd_caps *dsc_caps,
6225 				    uint32_t max_dsc_target_bpp_limit_override)
6226 {
6227 	const struct dc_link_settings *verified_link_cap = NULL;
6228 	uint32_t link_bw_in_kbps;
6229 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6230 	struct dc *dc = sink->ctx->dc;
6231 	struct dc_dsc_bw_range bw_range = {0};
6232 	struct dc_dsc_config dsc_cfg = {0};
6233 
6234 	verified_link_cap = dc_link_get_link_cap(stream->link);
6235 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6236 	edp_min_bpp_x16 = 8 * 16;
6237 	edp_max_bpp_x16 = 8 * 16;
6238 
6239 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6240 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6241 
6242 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6243 		edp_min_bpp_x16 = edp_max_bpp_x16;
6244 
6245 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6246 				dc->debug.dsc_min_slice_height_override,
6247 				edp_min_bpp_x16, edp_max_bpp_x16,
6248 				dsc_caps,
6249 				&stream->timing,
6250 				&bw_range)) {
6251 
6252 		if (bw_range.max_kbps < link_bw_in_kbps) {
6253 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6254 					dsc_caps,
6255 					dc->debug.dsc_min_slice_height_override,
6256 					max_dsc_target_bpp_limit_override,
6257 					0,
6258 					&stream->timing,
6259 					&dsc_cfg)) {
6260 				stream->timing.dsc_cfg = dsc_cfg;
6261 				stream->timing.flags.DSC = 1;
6262 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6263 			}
6264 			return;
6265 		}
6266 	}
6267 
6268 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6269 				dsc_caps,
6270 				dc->debug.dsc_min_slice_height_override,
6271 				max_dsc_target_bpp_limit_override,
6272 				link_bw_in_kbps,
6273 				&stream->timing,
6274 				&dsc_cfg)) {
6275 		stream->timing.dsc_cfg = dsc_cfg;
6276 		stream->timing.flags.DSC = 1;
6277 	}
6278 }
6279 
6280 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6281 										struct dc_sink *sink, struct dc_stream_state *stream,
6282 										struct dsc_dec_dpcd_caps *dsc_caps)
6283 {
6284 	struct drm_connector *drm_connector = &aconnector->base;
6285 	uint32_t link_bandwidth_kbps;
6286 	uint32_t max_dsc_target_bpp_limit_override = 0;
6287 	struct dc *dc = sink->ctx->dc;
6288 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6289 	uint32_t dsc_max_supported_bw_in_kbps;
6290 
6291 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6292 							dc_link_get_link_cap(aconnector->dc_link));
6293 
6294 	if (stream->link && stream->link->local_sink)
6295 		max_dsc_target_bpp_limit_override =
6296 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6297 
6298 	/* Set DSC policy according to dsc_clock_en */
6299 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6300 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6301 
6302 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6303 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6304 
6305 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6306 
6307 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6308 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6309 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6310 						dsc_caps,
6311 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6312 						max_dsc_target_bpp_limit_override,
6313 						link_bandwidth_kbps,
6314 						&stream->timing,
6315 						&stream->timing.dsc_cfg)) {
6316 				stream->timing.flags.DSC = 1;
6317 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6318 								 __func__, drm_connector->name);
6319 			}
6320 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6321 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6322 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6323 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6324 
6325 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6326 					max_supported_bw_in_kbps > 0 &&
6327 					dsc_max_supported_bw_in_kbps > 0)
6328 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6329 						dsc_caps,
6330 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6331 						max_dsc_target_bpp_limit_override,
6332 						dsc_max_supported_bw_in_kbps,
6333 						&stream->timing,
6334 						&stream->timing.dsc_cfg)) {
6335 					stream->timing.flags.DSC = 1;
6336 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6337 									 __func__, drm_connector->name);
6338 				}
6339 		}
6340 	}
6341 
6342 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6343 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6344 		stream->timing.flags.DSC = 1;
6345 
6346 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6347 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6348 
6349 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6350 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6351 
6352 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6353 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6354 }
6355 #endif /* CONFIG_DRM_AMD_DC_DCN */
6356 
6357 /**
6358  * DOC: FreeSync Video
6359  *
6360  * When a userspace application wants to play a video, the content follows a
6361  * standard format definition that usually specifies the FPS for that format.
6362  * The below list illustrates some video format and the expected FPS,
6363  * respectively:
6364  *
6365  * - TV/NTSC (23.976 FPS)
6366  * - Cinema (24 FPS)
6367  * - TV/PAL (25 FPS)
6368  * - TV/NTSC (29.97 FPS)
6369  * - TV/NTSC (30 FPS)
6370  * - Cinema HFR (48 FPS)
6371  * - TV/PAL (50 FPS)
6372  * - Commonly used (60 FPS)
6373  * - Multiples of 24 (48,72,96,120 FPS)
6374  *
6375  * The list of standards video format is not huge and can be added to the
6376  * connector modeset list beforehand. With that, userspace can leverage
6377  * FreeSync to extends the front porch in order to attain the target refresh
6378  * rate. Such a switch will happen seamlessly, without screen blanking or
6379  * reprogramming of the output in any other way. If the userspace requests a
6380  * modesetting change compatible with FreeSync modes that only differ in the
6381  * refresh rate, DC will skip the full update and avoid blink during the
6382  * transition. For example, the video player can change the modesetting from
6383  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6384  * causing any display blink. This same concept can be applied to a mode
6385  * setting change.
6386  */
6387 static struct drm_display_mode *
6388 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6389 			  bool use_probed_modes)
6390 {
6391 	struct drm_display_mode *m, *m_pref = NULL;
6392 	u16 current_refresh, highest_refresh;
6393 	struct list_head *list_head = use_probed_modes ?
6394 						    &aconnector->base.probed_modes :
6395 						    &aconnector->base.modes;
6396 
6397 	if (aconnector->freesync_vid_base.clock != 0)
6398 		return &aconnector->freesync_vid_base;
6399 
6400 	/* Find the preferred mode */
6401 	list_for_each_entry (m, list_head, head) {
6402 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6403 			m_pref = m;
6404 			break;
6405 		}
6406 	}
6407 
6408 	if (!m_pref) {
6409 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6410 		m_pref = list_first_entry_or_null(
6411 			&aconnector->base.modes, struct drm_display_mode, head);
6412 		if (!m_pref) {
6413 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6414 			return NULL;
6415 		}
6416 	}
6417 
6418 	highest_refresh = drm_mode_vrefresh(m_pref);
6419 
6420 	/*
6421 	 * Find the mode with highest refresh rate with same resolution.
6422 	 * For some monitors, preferred mode is not the mode with highest
6423 	 * supported refresh rate.
6424 	 */
6425 	list_for_each_entry (m, list_head, head) {
6426 		current_refresh  = drm_mode_vrefresh(m);
6427 
6428 		if (m->hdisplay == m_pref->hdisplay &&
6429 		    m->vdisplay == m_pref->vdisplay &&
6430 		    highest_refresh < current_refresh) {
6431 			highest_refresh = current_refresh;
6432 			m_pref = m;
6433 		}
6434 	}
6435 
6436 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6437 	return m_pref;
6438 }
6439 
6440 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6441 				   struct amdgpu_dm_connector *aconnector)
6442 {
6443 	struct drm_display_mode *high_mode;
6444 	int timing_diff;
6445 
6446 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6447 	if (!high_mode || !mode)
6448 		return false;
6449 
6450 	timing_diff = high_mode->vtotal - mode->vtotal;
6451 
6452 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6453 	    high_mode->hdisplay != mode->hdisplay ||
6454 	    high_mode->vdisplay != mode->vdisplay ||
6455 	    high_mode->hsync_start != mode->hsync_start ||
6456 	    high_mode->hsync_end != mode->hsync_end ||
6457 	    high_mode->htotal != mode->htotal ||
6458 	    high_mode->hskew != mode->hskew ||
6459 	    high_mode->vscan != mode->vscan ||
6460 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6461 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6462 		return false;
6463 	else
6464 		return true;
6465 }
6466 
6467 static struct dc_stream_state *
6468 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6469 		       const struct drm_display_mode *drm_mode,
6470 		       const struct dm_connector_state *dm_state,
6471 		       const struct dc_stream_state *old_stream,
6472 		       int requested_bpc)
6473 {
6474 	struct drm_display_mode *preferred_mode = NULL;
6475 	struct drm_connector *drm_connector;
6476 	const struct drm_connector_state *con_state =
6477 		dm_state ? &dm_state->base : NULL;
6478 	struct dc_stream_state *stream = NULL;
6479 	struct drm_display_mode mode = *drm_mode;
6480 	struct drm_display_mode saved_mode;
6481 	struct drm_display_mode *freesync_mode = NULL;
6482 	bool native_mode_found = false;
6483 	bool recalculate_timing = false;
6484 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6485 	int mode_refresh;
6486 	int preferred_refresh = 0;
6487 #if defined(CONFIG_DRM_AMD_DC_DCN)
6488 	struct dsc_dec_dpcd_caps dsc_caps;
6489 #endif
6490 	struct dc_sink *sink = NULL;
6491 
6492 	memset(&saved_mode, 0, sizeof(saved_mode));
6493 
6494 	if (aconnector == NULL) {
6495 		DRM_ERROR("aconnector is NULL!\n");
6496 		return stream;
6497 	}
6498 
6499 	drm_connector = &aconnector->base;
6500 
6501 	if (!aconnector->dc_sink) {
6502 		sink = create_fake_sink(aconnector);
6503 		if (!sink)
6504 			return stream;
6505 	} else {
6506 		sink = aconnector->dc_sink;
6507 		dc_sink_retain(sink);
6508 	}
6509 
6510 	stream = dc_create_stream_for_sink(sink);
6511 
6512 	if (stream == NULL) {
6513 		DRM_ERROR("Failed to create stream for sink!\n");
6514 		goto finish;
6515 	}
6516 
6517 	stream->dm_stream_context = aconnector;
6518 
6519 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6520 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6521 
6522 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6523 		/* Search for preferred mode */
6524 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6525 			native_mode_found = true;
6526 			break;
6527 		}
6528 	}
6529 	if (!native_mode_found)
6530 		preferred_mode = list_first_entry_or_null(
6531 				&aconnector->base.modes,
6532 				struct drm_display_mode,
6533 				head);
6534 
6535 	mode_refresh = drm_mode_vrefresh(&mode);
6536 
6537 	if (preferred_mode == NULL) {
6538 		/*
6539 		 * This may not be an error, the use case is when we have no
6540 		 * usermode calls to reset and set mode upon hotplug. In this
6541 		 * case, we call set mode ourselves to restore the previous mode
6542 		 * and the modelist may not be filled in in time.
6543 		 */
6544 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6545 	} else {
6546 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6547 		if (recalculate_timing) {
6548 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6549 			drm_mode_copy(&saved_mode, &mode);
6550 			drm_mode_copy(&mode, freesync_mode);
6551 		} else {
6552 			decide_crtc_timing_for_drm_display_mode(
6553 				&mode, preferred_mode, scale);
6554 
6555 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6556 		}
6557 	}
6558 
6559 	if (recalculate_timing)
6560 		drm_mode_set_crtcinfo(&saved_mode, 0);
6561 	else if (!dm_state)
6562 		drm_mode_set_crtcinfo(&mode, 0);
6563 
6564        /*
6565 	* If scaling is enabled and refresh rate didn't change
6566 	* we copy the vic and polarities of the old timings
6567 	*/
6568 	if (!scale || mode_refresh != preferred_refresh)
6569 		fill_stream_properties_from_drm_display_mode(
6570 			stream, &mode, &aconnector->base, con_state, NULL,
6571 			requested_bpc);
6572 	else
6573 		fill_stream_properties_from_drm_display_mode(
6574 			stream, &mode, &aconnector->base, con_state, old_stream,
6575 			requested_bpc);
6576 
6577 #if defined(CONFIG_DRM_AMD_DC_DCN)
6578 	/* SST DSC determination policy */
6579 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6580 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6581 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6582 #endif
6583 
6584 	update_stream_scaling_settings(&mode, dm_state, stream);
6585 
6586 	fill_audio_info(
6587 		&stream->audio_info,
6588 		drm_connector,
6589 		sink);
6590 
6591 	update_stream_signal(stream, sink);
6592 
6593 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6594 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6595 
6596 	if (stream->link->psr_settings.psr_feature_enabled) {
6597 		//
6598 		// should decide stream support vsc sdp colorimetry capability
6599 		// before building vsc info packet
6600 		//
6601 		stream->use_vsc_sdp_for_colorimetry = false;
6602 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6603 			stream->use_vsc_sdp_for_colorimetry =
6604 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6605 		} else {
6606 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6607 				stream->use_vsc_sdp_for_colorimetry = true;
6608 		}
6609 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6610 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6611 
6612 	}
6613 finish:
6614 	dc_sink_release(sink);
6615 
6616 	return stream;
6617 }
6618 
6619 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6620 {
6621 	drm_crtc_cleanup(crtc);
6622 	kfree(crtc);
6623 }
6624 
6625 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6626 				  struct drm_crtc_state *state)
6627 {
6628 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6629 
6630 	/* TODO Destroy dc_stream objects are stream object is flattened */
6631 	if (cur->stream)
6632 		dc_stream_release(cur->stream);
6633 
6634 
6635 	__drm_atomic_helper_crtc_destroy_state(state);
6636 
6637 
6638 	kfree(state);
6639 }
6640 
6641 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6642 {
6643 	struct dm_crtc_state *state;
6644 
6645 	if (crtc->state)
6646 		dm_crtc_destroy_state(crtc, crtc->state);
6647 
6648 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6649 	if (WARN_ON(!state))
6650 		return;
6651 
6652 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6653 }
6654 
6655 static struct drm_crtc_state *
6656 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6657 {
6658 	struct dm_crtc_state *state, *cur;
6659 
6660 	cur = to_dm_crtc_state(crtc->state);
6661 
6662 	if (WARN_ON(!crtc->state))
6663 		return NULL;
6664 
6665 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6666 	if (!state)
6667 		return NULL;
6668 
6669 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6670 
6671 	if (cur->stream) {
6672 		state->stream = cur->stream;
6673 		dc_stream_retain(state->stream);
6674 	}
6675 
6676 	state->active_planes = cur->active_planes;
6677 	state->vrr_infopacket = cur->vrr_infopacket;
6678 	state->abm_level = cur->abm_level;
6679 	state->vrr_supported = cur->vrr_supported;
6680 	state->freesync_config = cur->freesync_config;
6681 	state->cm_has_degamma = cur->cm_has_degamma;
6682 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6683 	state->force_dpms_off = cur->force_dpms_off;
6684 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6685 
6686 	return &state->base;
6687 }
6688 
6689 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6690 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6691 {
6692 	crtc_debugfs_init(crtc);
6693 
6694 	return 0;
6695 }
6696 #endif
6697 
6698 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6699 {
6700 	enum dc_irq_source irq_source;
6701 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6702 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6703 	int rc;
6704 
6705 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6706 
6707 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6708 
6709 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6710 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6711 	return rc;
6712 }
6713 
6714 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6715 {
6716 	enum dc_irq_source irq_source;
6717 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6718 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6719 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6720 	struct amdgpu_display_manager *dm = &adev->dm;
6721 	struct vblank_control_work *work;
6722 	int rc = 0;
6723 
6724 	if (enable) {
6725 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6726 		if (amdgpu_dm_vrr_active(acrtc_state))
6727 			rc = dm_set_vupdate_irq(crtc, true);
6728 	} else {
6729 		/* vblank irq off -> vupdate irq off */
6730 		rc = dm_set_vupdate_irq(crtc, false);
6731 	}
6732 
6733 	if (rc)
6734 		return rc;
6735 
6736 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6737 
6738 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6739 		return -EBUSY;
6740 
6741 	if (amdgpu_in_reset(adev))
6742 		return 0;
6743 
6744 	if (dm->vblank_control_workqueue) {
6745 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6746 		if (!work)
6747 			return -ENOMEM;
6748 
6749 		INIT_WORK(&work->work, vblank_control_worker);
6750 		work->dm = dm;
6751 		work->acrtc = acrtc;
6752 		work->enable = enable;
6753 
6754 		if (acrtc_state->stream) {
6755 			dc_stream_retain(acrtc_state->stream);
6756 			work->stream = acrtc_state->stream;
6757 		}
6758 
6759 		queue_work(dm->vblank_control_workqueue, &work->work);
6760 	}
6761 
6762 	return 0;
6763 }
6764 
6765 static int dm_enable_vblank(struct drm_crtc *crtc)
6766 {
6767 	return dm_set_vblank(crtc, true);
6768 }
6769 
6770 static void dm_disable_vblank(struct drm_crtc *crtc)
6771 {
6772 	dm_set_vblank(crtc, false);
6773 }
6774 
6775 /* Implemented only the options currently available for the driver */
6776 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6777 	.reset = dm_crtc_reset_state,
6778 	.destroy = amdgpu_dm_crtc_destroy,
6779 	.set_config = drm_atomic_helper_set_config,
6780 	.page_flip = drm_atomic_helper_page_flip,
6781 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6782 	.atomic_destroy_state = dm_crtc_destroy_state,
6783 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6784 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6785 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6786 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6787 	.enable_vblank = dm_enable_vblank,
6788 	.disable_vblank = dm_disable_vblank,
6789 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6790 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6791 	.late_register = amdgpu_dm_crtc_late_register,
6792 #endif
6793 };
6794 
6795 static enum drm_connector_status
6796 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6797 {
6798 	bool connected;
6799 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6800 
6801 	/*
6802 	 * Notes:
6803 	 * 1. This interface is NOT called in context of HPD irq.
6804 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6805 	 * makes it a bad place for *any* MST-related activity.
6806 	 */
6807 
6808 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6809 	    !aconnector->fake_enable)
6810 		connected = (aconnector->dc_sink != NULL);
6811 	else
6812 		connected = (aconnector->base.force == DRM_FORCE_ON);
6813 
6814 	update_subconnector_property(aconnector);
6815 
6816 	return (connected ? connector_status_connected :
6817 			connector_status_disconnected);
6818 }
6819 
6820 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6821 					    struct drm_connector_state *connector_state,
6822 					    struct drm_property *property,
6823 					    uint64_t val)
6824 {
6825 	struct drm_device *dev = connector->dev;
6826 	struct amdgpu_device *adev = drm_to_adev(dev);
6827 	struct dm_connector_state *dm_old_state =
6828 		to_dm_connector_state(connector->state);
6829 	struct dm_connector_state *dm_new_state =
6830 		to_dm_connector_state(connector_state);
6831 
6832 	int ret = -EINVAL;
6833 
6834 	if (property == dev->mode_config.scaling_mode_property) {
6835 		enum amdgpu_rmx_type rmx_type;
6836 
6837 		switch (val) {
6838 		case DRM_MODE_SCALE_CENTER:
6839 			rmx_type = RMX_CENTER;
6840 			break;
6841 		case DRM_MODE_SCALE_ASPECT:
6842 			rmx_type = RMX_ASPECT;
6843 			break;
6844 		case DRM_MODE_SCALE_FULLSCREEN:
6845 			rmx_type = RMX_FULL;
6846 			break;
6847 		case DRM_MODE_SCALE_NONE:
6848 		default:
6849 			rmx_type = RMX_OFF;
6850 			break;
6851 		}
6852 
6853 		if (dm_old_state->scaling == rmx_type)
6854 			return 0;
6855 
6856 		dm_new_state->scaling = rmx_type;
6857 		ret = 0;
6858 	} else if (property == adev->mode_info.underscan_hborder_property) {
6859 		dm_new_state->underscan_hborder = val;
6860 		ret = 0;
6861 	} else if (property == adev->mode_info.underscan_vborder_property) {
6862 		dm_new_state->underscan_vborder = val;
6863 		ret = 0;
6864 	} else if (property == adev->mode_info.underscan_property) {
6865 		dm_new_state->underscan_enable = val;
6866 		ret = 0;
6867 	} else if (property == adev->mode_info.abm_level_property) {
6868 		dm_new_state->abm_level = val;
6869 		ret = 0;
6870 	}
6871 
6872 	return ret;
6873 }
6874 
6875 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6876 					    const struct drm_connector_state *state,
6877 					    struct drm_property *property,
6878 					    uint64_t *val)
6879 {
6880 	struct drm_device *dev = connector->dev;
6881 	struct amdgpu_device *adev = drm_to_adev(dev);
6882 	struct dm_connector_state *dm_state =
6883 		to_dm_connector_state(state);
6884 	int ret = -EINVAL;
6885 
6886 	if (property == dev->mode_config.scaling_mode_property) {
6887 		switch (dm_state->scaling) {
6888 		case RMX_CENTER:
6889 			*val = DRM_MODE_SCALE_CENTER;
6890 			break;
6891 		case RMX_ASPECT:
6892 			*val = DRM_MODE_SCALE_ASPECT;
6893 			break;
6894 		case RMX_FULL:
6895 			*val = DRM_MODE_SCALE_FULLSCREEN;
6896 			break;
6897 		case RMX_OFF:
6898 		default:
6899 			*val = DRM_MODE_SCALE_NONE;
6900 			break;
6901 		}
6902 		ret = 0;
6903 	} else if (property == adev->mode_info.underscan_hborder_property) {
6904 		*val = dm_state->underscan_hborder;
6905 		ret = 0;
6906 	} else if (property == adev->mode_info.underscan_vborder_property) {
6907 		*val = dm_state->underscan_vborder;
6908 		ret = 0;
6909 	} else if (property == adev->mode_info.underscan_property) {
6910 		*val = dm_state->underscan_enable;
6911 		ret = 0;
6912 	} else if (property == adev->mode_info.abm_level_property) {
6913 		*val = dm_state->abm_level;
6914 		ret = 0;
6915 	}
6916 
6917 	return ret;
6918 }
6919 
6920 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6921 {
6922 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6923 
6924 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6925 }
6926 
6927 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6928 {
6929 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6930 	const struct dc_link *link = aconnector->dc_link;
6931 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6932 	struct amdgpu_display_manager *dm = &adev->dm;
6933 	int i;
6934 
6935 	/*
6936 	 * Call only if mst_mgr was iniitalized before since it's not done
6937 	 * for all connector types.
6938 	 */
6939 	if (aconnector->mst_mgr.dev)
6940 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6941 
6942 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6943 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6944 	for (i = 0; i < dm->num_of_edps; i++) {
6945 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6946 			backlight_device_unregister(dm->backlight_dev[i]);
6947 			dm->backlight_dev[i] = NULL;
6948 		}
6949 	}
6950 #endif
6951 
6952 	if (aconnector->dc_em_sink)
6953 		dc_sink_release(aconnector->dc_em_sink);
6954 	aconnector->dc_em_sink = NULL;
6955 	if (aconnector->dc_sink)
6956 		dc_sink_release(aconnector->dc_sink);
6957 	aconnector->dc_sink = NULL;
6958 
6959 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6960 	drm_connector_unregister(connector);
6961 	drm_connector_cleanup(connector);
6962 	if (aconnector->i2c) {
6963 		i2c_del_adapter(&aconnector->i2c->base);
6964 		kfree(aconnector->i2c);
6965 	}
6966 	kfree(aconnector->dm_dp_aux.aux.name);
6967 
6968 	kfree(connector);
6969 }
6970 
6971 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6972 {
6973 	struct dm_connector_state *state =
6974 		to_dm_connector_state(connector->state);
6975 
6976 	if (connector->state)
6977 		__drm_atomic_helper_connector_destroy_state(connector->state);
6978 
6979 	kfree(state);
6980 
6981 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6982 
6983 	if (state) {
6984 		state->scaling = RMX_OFF;
6985 		state->underscan_enable = false;
6986 		state->underscan_hborder = 0;
6987 		state->underscan_vborder = 0;
6988 		state->base.max_requested_bpc = 8;
6989 		state->vcpi_slots = 0;
6990 		state->pbn = 0;
6991 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6992 			state->abm_level = amdgpu_dm_abm_level;
6993 
6994 		__drm_atomic_helper_connector_reset(connector, &state->base);
6995 	}
6996 }
6997 
6998 struct drm_connector_state *
6999 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
7000 {
7001 	struct dm_connector_state *state =
7002 		to_dm_connector_state(connector->state);
7003 
7004 	struct dm_connector_state *new_state =
7005 			kmemdup(state, sizeof(*state), GFP_KERNEL);
7006 
7007 	if (!new_state)
7008 		return NULL;
7009 
7010 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7011 
7012 	new_state->freesync_capable = state->freesync_capable;
7013 	new_state->abm_level = state->abm_level;
7014 	new_state->scaling = state->scaling;
7015 	new_state->underscan_enable = state->underscan_enable;
7016 	new_state->underscan_hborder = state->underscan_hborder;
7017 	new_state->underscan_vborder = state->underscan_vborder;
7018 	new_state->vcpi_slots = state->vcpi_slots;
7019 	new_state->pbn = state->pbn;
7020 	return &new_state->base;
7021 }
7022 
7023 static int
7024 amdgpu_dm_connector_late_register(struct drm_connector *connector)
7025 {
7026 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7027 		to_amdgpu_dm_connector(connector);
7028 	int r;
7029 
7030 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7031 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7032 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7033 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7034 		if (r)
7035 			return r;
7036 	}
7037 
7038 #if defined(CONFIG_DEBUG_FS)
7039 	connector_debugfs_init(amdgpu_dm_connector);
7040 #endif
7041 
7042 	return 0;
7043 }
7044 
7045 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7046 	.reset = amdgpu_dm_connector_funcs_reset,
7047 	.detect = amdgpu_dm_connector_detect,
7048 	.fill_modes = drm_helper_probe_single_connector_modes,
7049 	.destroy = amdgpu_dm_connector_destroy,
7050 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7051 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7052 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7053 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7054 	.late_register = amdgpu_dm_connector_late_register,
7055 	.early_unregister = amdgpu_dm_connector_unregister
7056 };
7057 
7058 static int get_modes(struct drm_connector *connector)
7059 {
7060 	return amdgpu_dm_connector_get_modes(connector);
7061 }
7062 
7063 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7064 {
7065 	struct dc_sink_init_data init_params = {
7066 			.link = aconnector->dc_link,
7067 			.sink_signal = SIGNAL_TYPE_VIRTUAL
7068 	};
7069 	struct edid *edid;
7070 
7071 	if (!aconnector->base.edid_blob_ptr) {
7072 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7073 				aconnector->base.name);
7074 
7075 		aconnector->base.force = DRM_FORCE_OFF;
7076 		aconnector->base.override_edid = false;
7077 		return;
7078 	}
7079 
7080 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7081 
7082 	aconnector->edid = edid;
7083 
7084 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7085 		aconnector->dc_link,
7086 		(uint8_t *)edid,
7087 		(edid->extensions + 1) * EDID_LENGTH,
7088 		&init_params);
7089 
7090 	if (aconnector->base.force == DRM_FORCE_ON) {
7091 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7092 		aconnector->dc_link->local_sink :
7093 		aconnector->dc_em_sink;
7094 		dc_sink_retain(aconnector->dc_sink);
7095 	}
7096 }
7097 
7098 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7099 {
7100 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7101 
7102 	/*
7103 	 * In case of headless boot with force on for DP managed connector
7104 	 * Those settings have to be != 0 to get initial modeset
7105 	 */
7106 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7107 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7108 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7109 	}
7110 
7111 
7112 	aconnector->base.override_edid = true;
7113 	create_eml_sink(aconnector);
7114 }
7115 
7116 struct dc_stream_state *
7117 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7118 				const struct drm_display_mode *drm_mode,
7119 				const struct dm_connector_state *dm_state,
7120 				const struct dc_stream_state *old_stream)
7121 {
7122 	struct drm_connector *connector = &aconnector->base;
7123 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7124 	struct dc_stream_state *stream;
7125 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7126 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7127 	enum dc_status dc_result = DC_OK;
7128 
7129 	do {
7130 		stream = create_stream_for_sink(aconnector, drm_mode,
7131 						dm_state, old_stream,
7132 						requested_bpc);
7133 		if (stream == NULL) {
7134 			DRM_ERROR("Failed to create stream for sink!\n");
7135 			break;
7136 		}
7137 
7138 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7139 
7140 		if (dc_result != DC_OK) {
7141 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7142 				      drm_mode->hdisplay,
7143 				      drm_mode->vdisplay,
7144 				      drm_mode->clock,
7145 				      dc_result,
7146 				      dc_status_to_str(dc_result));
7147 
7148 			dc_stream_release(stream);
7149 			stream = NULL;
7150 			requested_bpc -= 2; /* lower bpc to retry validation */
7151 		}
7152 
7153 	} while (stream == NULL && requested_bpc >= 6);
7154 
7155 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7156 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7157 
7158 		aconnector->force_yuv420_output = true;
7159 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7160 						dm_state, old_stream);
7161 		aconnector->force_yuv420_output = false;
7162 	}
7163 
7164 	return stream;
7165 }
7166 
7167 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7168 				   struct drm_display_mode *mode)
7169 {
7170 	int result = MODE_ERROR;
7171 	struct dc_sink *dc_sink;
7172 	/* TODO: Unhardcode stream count */
7173 	struct dc_stream_state *stream;
7174 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7175 
7176 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7177 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7178 		return result;
7179 
7180 	/*
7181 	 * Only run this the first time mode_valid is called to initilialize
7182 	 * EDID mgmt
7183 	 */
7184 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7185 		!aconnector->dc_em_sink)
7186 		handle_edid_mgmt(aconnector);
7187 
7188 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7189 
7190 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7191 				aconnector->base.force != DRM_FORCE_ON) {
7192 		DRM_ERROR("dc_sink is NULL!\n");
7193 		goto fail;
7194 	}
7195 
7196 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7197 	if (stream) {
7198 		dc_stream_release(stream);
7199 		result = MODE_OK;
7200 	}
7201 
7202 fail:
7203 	/* TODO: error handling*/
7204 	return result;
7205 }
7206 
7207 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7208 				struct dc_info_packet *out)
7209 {
7210 	struct hdmi_drm_infoframe frame;
7211 	unsigned char buf[30]; /* 26 + 4 */
7212 	ssize_t len;
7213 	int ret, i;
7214 
7215 	memset(out, 0, sizeof(*out));
7216 
7217 	if (!state->hdr_output_metadata)
7218 		return 0;
7219 
7220 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7221 	if (ret)
7222 		return ret;
7223 
7224 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7225 	if (len < 0)
7226 		return (int)len;
7227 
7228 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7229 	if (len != 30)
7230 		return -EINVAL;
7231 
7232 	/* Prepare the infopacket for DC. */
7233 	switch (state->connector->connector_type) {
7234 	case DRM_MODE_CONNECTOR_HDMIA:
7235 		out->hb0 = 0x87; /* type */
7236 		out->hb1 = 0x01; /* version */
7237 		out->hb2 = 0x1A; /* length */
7238 		out->sb[0] = buf[3]; /* checksum */
7239 		i = 1;
7240 		break;
7241 
7242 	case DRM_MODE_CONNECTOR_DisplayPort:
7243 	case DRM_MODE_CONNECTOR_eDP:
7244 		out->hb0 = 0x00; /* sdp id, zero */
7245 		out->hb1 = 0x87; /* type */
7246 		out->hb2 = 0x1D; /* payload len - 1 */
7247 		out->hb3 = (0x13 << 2); /* sdp version */
7248 		out->sb[0] = 0x01; /* version */
7249 		out->sb[1] = 0x1A; /* length */
7250 		i = 2;
7251 		break;
7252 
7253 	default:
7254 		return -EINVAL;
7255 	}
7256 
7257 	memcpy(&out->sb[i], &buf[4], 26);
7258 	out->valid = true;
7259 
7260 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7261 		       sizeof(out->sb), false);
7262 
7263 	return 0;
7264 }
7265 
7266 static int
7267 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7268 				 struct drm_atomic_state *state)
7269 {
7270 	struct drm_connector_state *new_con_state =
7271 		drm_atomic_get_new_connector_state(state, conn);
7272 	struct drm_connector_state *old_con_state =
7273 		drm_atomic_get_old_connector_state(state, conn);
7274 	struct drm_crtc *crtc = new_con_state->crtc;
7275 	struct drm_crtc_state *new_crtc_state;
7276 	int ret;
7277 
7278 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7279 
7280 	if (!crtc)
7281 		return 0;
7282 
7283 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7284 		struct dc_info_packet hdr_infopacket;
7285 
7286 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7287 		if (ret)
7288 			return ret;
7289 
7290 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7291 		if (IS_ERR(new_crtc_state))
7292 			return PTR_ERR(new_crtc_state);
7293 
7294 		/*
7295 		 * DC considers the stream backends changed if the
7296 		 * static metadata changes. Forcing the modeset also
7297 		 * gives a simple way for userspace to switch from
7298 		 * 8bpc to 10bpc when setting the metadata to enter
7299 		 * or exit HDR.
7300 		 *
7301 		 * Changing the static metadata after it's been
7302 		 * set is permissible, however. So only force a
7303 		 * modeset if we're entering or exiting HDR.
7304 		 */
7305 		new_crtc_state->mode_changed =
7306 			!old_con_state->hdr_output_metadata ||
7307 			!new_con_state->hdr_output_metadata;
7308 	}
7309 
7310 	return 0;
7311 }
7312 
7313 static const struct drm_connector_helper_funcs
7314 amdgpu_dm_connector_helper_funcs = {
7315 	/*
7316 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7317 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7318 	 * are missing after user start lightdm. So we need to renew modes list.
7319 	 * in get_modes call back, not just return the modes count
7320 	 */
7321 	.get_modes = get_modes,
7322 	.mode_valid = amdgpu_dm_connector_mode_valid,
7323 	.atomic_check = amdgpu_dm_connector_atomic_check,
7324 };
7325 
7326 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7327 {
7328 }
7329 
7330 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7331 {
7332 	struct drm_atomic_state *state = new_crtc_state->state;
7333 	struct drm_plane *plane;
7334 	int num_active = 0;
7335 
7336 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7337 		struct drm_plane_state *new_plane_state;
7338 
7339 		/* Cursor planes are "fake". */
7340 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7341 			continue;
7342 
7343 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7344 
7345 		if (!new_plane_state) {
7346 			/*
7347 			 * The plane is enable on the CRTC and hasn't changed
7348 			 * state. This means that it previously passed
7349 			 * validation and is therefore enabled.
7350 			 */
7351 			num_active += 1;
7352 			continue;
7353 		}
7354 
7355 		/* We need a framebuffer to be considered enabled. */
7356 		num_active += (new_plane_state->fb != NULL);
7357 	}
7358 
7359 	return num_active;
7360 }
7361 
7362 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7363 					 struct drm_crtc_state *new_crtc_state)
7364 {
7365 	struct dm_crtc_state *dm_new_crtc_state =
7366 		to_dm_crtc_state(new_crtc_state);
7367 
7368 	dm_new_crtc_state->active_planes = 0;
7369 
7370 	if (!dm_new_crtc_state->stream)
7371 		return;
7372 
7373 	dm_new_crtc_state->active_planes =
7374 		count_crtc_active_planes(new_crtc_state);
7375 }
7376 
7377 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7378 				       struct drm_atomic_state *state)
7379 {
7380 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7381 									  crtc);
7382 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7383 	struct dc *dc = adev->dm.dc;
7384 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7385 	int ret = -EINVAL;
7386 
7387 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7388 
7389 	dm_update_crtc_active_planes(crtc, crtc_state);
7390 
7391 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7392 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7393 		return ret;
7394 	}
7395 
7396 	/*
7397 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7398 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7399 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7400 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7401 	 */
7402 	if (crtc_state->enable &&
7403 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7404 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7405 		return -EINVAL;
7406 	}
7407 
7408 	/* In some use cases, like reset, no stream is attached */
7409 	if (!dm_crtc_state->stream)
7410 		return 0;
7411 
7412 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7413 		return 0;
7414 
7415 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7416 	return ret;
7417 }
7418 
7419 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7420 				      const struct drm_display_mode *mode,
7421 				      struct drm_display_mode *adjusted_mode)
7422 {
7423 	return true;
7424 }
7425 
7426 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7427 	.disable = dm_crtc_helper_disable,
7428 	.atomic_check = dm_crtc_helper_atomic_check,
7429 	.mode_fixup = dm_crtc_helper_mode_fixup,
7430 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7431 };
7432 
7433 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7434 {
7435 
7436 }
7437 
7438 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7439 {
7440 	switch (display_color_depth) {
7441 		case COLOR_DEPTH_666:
7442 			return 6;
7443 		case COLOR_DEPTH_888:
7444 			return 8;
7445 		case COLOR_DEPTH_101010:
7446 			return 10;
7447 		case COLOR_DEPTH_121212:
7448 			return 12;
7449 		case COLOR_DEPTH_141414:
7450 			return 14;
7451 		case COLOR_DEPTH_161616:
7452 			return 16;
7453 		default:
7454 			break;
7455 		}
7456 	return 0;
7457 }
7458 
7459 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7460 					  struct drm_crtc_state *crtc_state,
7461 					  struct drm_connector_state *conn_state)
7462 {
7463 	struct drm_atomic_state *state = crtc_state->state;
7464 	struct drm_connector *connector = conn_state->connector;
7465 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7466 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7467 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7468 	struct drm_dp_mst_topology_mgr *mst_mgr;
7469 	struct drm_dp_mst_port *mst_port;
7470 	enum dc_color_depth color_depth;
7471 	int clock, bpp = 0;
7472 	bool is_y420 = false;
7473 
7474 	if (!aconnector->port || !aconnector->dc_sink)
7475 		return 0;
7476 
7477 	mst_port = aconnector->port;
7478 	mst_mgr = &aconnector->mst_port->mst_mgr;
7479 
7480 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7481 		return 0;
7482 
7483 	if (!state->duplicated) {
7484 		int max_bpc = conn_state->max_requested_bpc;
7485 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7486 				aconnector->force_yuv420_output;
7487 		color_depth = convert_color_depth_from_display_info(connector,
7488 								    is_y420,
7489 								    max_bpc);
7490 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7491 		clock = adjusted_mode->clock;
7492 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7493 	}
7494 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7495 									   mst_mgr,
7496 									   mst_port,
7497 									   dm_new_connector_state->pbn,
7498 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7499 	if (dm_new_connector_state->vcpi_slots < 0) {
7500 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7501 		return dm_new_connector_state->vcpi_slots;
7502 	}
7503 	return 0;
7504 }
7505 
7506 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7507 	.disable = dm_encoder_helper_disable,
7508 	.atomic_check = dm_encoder_helper_atomic_check
7509 };
7510 
7511 #if defined(CONFIG_DRM_AMD_DC_DCN)
7512 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7513 					    struct dc_state *dc_state,
7514 					    struct dsc_mst_fairness_vars *vars)
7515 {
7516 	struct dc_stream_state *stream = NULL;
7517 	struct drm_connector *connector;
7518 	struct drm_connector_state *new_con_state;
7519 	struct amdgpu_dm_connector *aconnector;
7520 	struct dm_connector_state *dm_conn_state;
7521 	int i, j;
7522 	int vcpi, pbn_div, pbn, slot_num = 0;
7523 
7524 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7525 
7526 		aconnector = to_amdgpu_dm_connector(connector);
7527 
7528 		if (!aconnector->port)
7529 			continue;
7530 
7531 		if (!new_con_state || !new_con_state->crtc)
7532 			continue;
7533 
7534 		dm_conn_state = to_dm_connector_state(new_con_state);
7535 
7536 		for (j = 0; j < dc_state->stream_count; j++) {
7537 			stream = dc_state->streams[j];
7538 			if (!stream)
7539 				continue;
7540 
7541 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7542 				break;
7543 
7544 			stream = NULL;
7545 		}
7546 
7547 		if (!stream)
7548 			continue;
7549 
7550 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7551 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7552 		for (j = 0; j < dc_state->stream_count; j++) {
7553 			if (vars[j].aconnector == aconnector) {
7554 				pbn = vars[j].pbn;
7555 				break;
7556 			}
7557 		}
7558 
7559 		if (j == dc_state->stream_count)
7560 			continue;
7561 
7562 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7563 
7564 		if (stream->timing.flags.DSC != 1) {
7565 			dm_conn_state->pbn = pbn;
7566 			dm_conn_state->vcpi_slots = slot_num;
7567 
7568 			drm_dp_mst_atomic_enable_dsc(state,
7569 						     aconnector->port,
7570 						     dm_conn_state->pbn,
7571 						     0,
7572 						     false);
7573 			continue;
7574 		}
7575 
7576 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7577 						    aconnector->port,
7578 						    pbn, pbn_div,
7579 						    true);
7580 		if (vcpi < 0)
7581 			return vcpi;
7582 
7583 		dm_conn_state->pbn = pbn;
7584 		dm_conn_state->vcpi_slots = vcpi;
7585 	}
7586 	return 0;
7587 }
7588 #endif
7589 
7590 static void dm_drm_plane_reset(struct drm_plane *plane)
7591 {
7592 	struct dm_plane_state *amdgpu_state = NULL;
7593 
7594 	if (plane->state)
7595 		plane->funcs->atomic_destroy_state(plane, plane->state);
7596 
7597 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7598 	WARN_ON(amdgpu_state == NULL);
7599 
7600 	if (amdgpu_state)
7601 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7602 }
7603 
7604 static struct drm_plane_state *
7605 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7606 {
7607 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7608 
7609 	old_dm_plane_state = to_dm_plane_state(plane->state);
7610 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7611 	if (!dm_plane_state)
7612 		return NULL;
7613 
7614 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7615 
7616 	if (old_dm_plane_state->dc_state) {
7617 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7618 		dc_plane_state_retain(dm_plane_state->dc_state);
7619 	}
7620 
7621 	return &dm_plane_state->base;
7622 }
7623 
7624 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7625 				struct drm_plane_state *state)
7626 {
7627 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7628 
7629 	if (dm_plane_state->dc_state)
7630 		dc_plane_state_release(dm_plane_state->dc_state);
7631 
7632 	drm_atomic_helper_plane_destroy_state(plane, state);
7633 }
7634 
7635 static const struct drm_plane_funcs dm_plane_funcs = {
7636 	.update_plane	= drm_atomic_helper_update_plane,
7637 	.disable_plane	= drm_atomic_helper_disable_plane,
7638 	.destroy	= drm_primary_helper_destroy,
7639 	.reset = dm_drm_plane_reset,
7640 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7641 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7642 	.format_mod_supported = dm_plane_format_mod_supported,
7643 };
7644 
7645 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7646 				      struct drm_plane_state *new_state)
7647 {
7648 	struct amdgpu_framebuffer *afb;
7649 	struct drm_gem_object *obj;
7650 	struct amdgpu_device *adev;
7651 	struct amdgpu_bo *rbo;
7652 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7653 	uint32_t domain;
7654 	int r;
7655 
7656 	if (!new_state->fb) {
7657 		DRM_DEBUG_KMS("No FB bound\n");
7658 		return 0;
7659 	}
7660 
7661 	afb = to_amdgpu_framebuffer(new_state->fb);
7662 	obj = new_state->fb->obj[0];
7663 	rbo = gem_to_amdgpu_bo(obj);
7664 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7665 
7666 	r = amdgpu_bo_reserve(rbo, true);
7667 	if (r) {
7668 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7669 		return r;
7670 	}
7671 
7672 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7673 	if (r) {
7674 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7675 		goto error_unlock;
7676 	}
7677 
7678 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7679 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7680 	else
7681 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7682 
7683 	r = amdgpu_bo_pin(rbo, domain);
7684 	if (unlikely(r != 0)) {
7685 		if (r != -ERESTARTSYS)
7686 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7687 		goto error_unlock;
7688 	}
7689 
7690 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7691 	if (unlikely(r != 0)) {
7692 		DRM_ERROR("%p bind failed\n", rbo);
7693 		goto error_unpin;
7694 	}
7695 
7696 	amdgpu_bo_unreserve(rbo);
7697 
7698 	afb->address = amdgpu_bo_gpu_offset(rbo);
7699 
7700 	amdgpu_bo_ref(rbo);
7701 
7702 	/**
7703 	 * We don't do surface updates on planes that have been newly created,
7704 	 * but we also don't have the afb->address during atomic check.
7705 	 *
7706 	 * Fill in buffer attributes depending on the address here, but only on
7707 	 * newly created planes since they're not being used by DC yet and this
7708 	 * won't modify global state.
7709 	 */
7710 	dm_plane_state_old = to_dm_plane_state(plane->state);
7711 	dm_plane_state_new = to_dm_plane_state(new_state);
7712 
7713 	if (dm_plane_state_new->dc_state &&
7714 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7715 		struct dc_plane_state *plane_state =
7716 			dm_plane_state_new->dc_state;
7717 		bool force_disable_dcc = !plane_state->dcc.enable;
7718 
7719 		fill_plane_buffer_attributes(
7720 			adev, afb, plane_state->format, plane_state->rotation,
7721 			afb->tiling_flags,
7722 			&plane_state->tiling_info, &plane_state->plane_size,
7723 			&plane_state->dcc, &plane_state->address,
7724 			afb->tmz_surface, force_disable_dcc);
7725 	}
7726 
7727 	return 0;
7728 
7729 error_unpin:
7730 	amdgpu_bo_unpin(rbo);
7731 
7732 error_unlock:
7733 	amdgpu_bo_unreserve(rbo);
7734 	return r;
7735 }
7736 
7737 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7738 				       struct drm_plane_state *old_state)
7739 {
7740 	struct amdgpu_bo *rbo;
7741 	int r;
7742 
7743 	if (!old_state->fb)
7744 		return;
7745 
7746 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7747 	r = amdgpu_bo_reserve(rbo, false);
7748 	if (unlikely(r)) {
7749 		DRM_ERROR("failed to reserve rbo before unpin\n");
7750 		return;
7751 	}
7752 
7753 	amdgpu_bo_unpin(rbo);
7754 	amdgpu_bo_unreserve(rbo);
7755 	amdgpu_bo_unref(&rbo);
7756 }
7757 
7758 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7759 				       struct drm_crtc_state *new_crtc_state)
7760 {
7761 	struct drm_framebuffer *fb = state->fb;
7762 	int min_downscale, max_upscale;
7763 	int min_scale = 0;
7764 	int max_scale = INT_MAX;
7765 
7766 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7767 	if (fb && state->crtc) {
7768 		/* Validate viewport to cover the case when only the position changes */
7769 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7770 			int viewport_width = state->crtc_w;
7771 			int viewport_height = state->crtc_h;
7772 
7773 			if (state->crtc_x < 0)
7774 				viewport_width += state->crtc_x;
7775 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7776 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7777 
7778 			if (state->crtc_y < 0)
7779 				viewport_height += state->crtc_y;
7780 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7781 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7782 
7783 			if (viewport_width < 0 || viewport_height < 0) {
7784 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7785 				return -EINVAL;
7786 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7787 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7788 				return -EINVAL;
7789 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7790 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7791 				return -EINVAL;
7792 			}
7793 
7794 		}
7795 
7796 		/* Get min/max allowed scaling factors from plane caps. */
7797 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7798 					     &min_downscale, &max_upscale);
7799 		/*
7800 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7801 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7802 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7803 		 */
7804 		min_scale = (1000 << 16) / max_upscale;
7805 		max_scale = (1000 << 16) / min_downscale;
7806 	}
7807 
7808 	return drm_atomic_helper_check_plane_state(
7809 		state, new_crtc_state, min_scale, max_scale, true, true);
7810 }
7811 
7812 static int dm_plane_atomic_check(struct drm_plane *plane,
7813 				 struct drm_atomic_state *state)
7814 {
7815 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7816 										 plane);
7817 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7818 	struct dc *dc = adev->dm.dc;
7819 	struct dm_plane_state *dm_plane_state;
7820 	struct dc_scaling_info scaling_info;
7821 	struct drm_crtc_state *new_crtc_state;
7822 	int ret;
7823 
7824 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7825 
7826 	dm_plane_state = to_dm_plane_state(new_plane_state);
7827 
7828 	if (!dm_plane_state->dc_state)
7829 		return 0;
7830 
7831 	new_crtc_state =
7832 		drm_atomic_get_new_crtc_state(state,
7833 					      new_plane_state->crtc);
7834 	if (!new_crtc_state)
7835 		return -EINVAL;
7836 
7837 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7838 	if (ret)
7839 		return ret;
7840 
7841 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7842 	if (ret)
7843 		return ret;
7844 
7845 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7846 		return 0;
7847 
7848 	return -EINVAL;
7849 }
7850 
7851 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7852 				       struct drm_atomic_state *state)
7853 {
7854 	/* Only support async updates on cursor planes. */
7855 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7856 		return -EINVAL;
7857 
7858 	return 0;
7859 }
7860 
7861 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7862 					 struct drm_atomic_state *state)
7863 {
7864 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7865 									   plane);
7866 	struct drm_plane_state *old_state =
7867 		drm_atomic_get_old_plane_state(state, plane);
7868 
7869 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7870 
7871 	swap(plane->state->fb, new_state->fb);
7872 
7873 	plane->state->src_x = new_state->src_x;
7874 	plane->state->src_y = new_state->src_y;
7875 	plane->state->src_w = new_state->src_w;
7876 	plane->state->src_h = new_state->src_h;
7877 	plane->state->crtc_x = new_state->crtc_x;
7878 	plane->state->crtc_y = new_state->crtc_y;
7879 	plane->state->crtc_w = new_state->crtc_w;
7880 	plane->state->crtc_h = new_state->crtc_h;
7881 
7882 	handle_cursor_update(plane, old_state);
7883 }
7884 
7885 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7886 	.prepare_fb = dm_plane_helper_prepare_fb,
7887 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7888 	.atomic_check = dm_plane_atomic_check,
7889 	.atomic_async_check = dm_plane_atomic_async_check,
7890 	.atomic_async_update = dm_plane_atomic_async_update
7891 };
7892 
7893 /*
7894  * TODO: these are currently initialized to rgb formats only.
7895  * For future use cases we should either initialize them dynamically based on
7896  * plane capabilities, or initialize this array to all formats, so internal drm
7897  * check will succeed, and let DC implement proper check
7898  */
7899 static const uint32_t rgb_formats[] = {
7900 	DRM_FORMAT_XRGB8888,
7901 	DRM_FORMAT_ARGB8888,
7902 	DRM_FORMAT_RGBA8888,
7903 	DRM_FORMAT_XRGB2101010,
7904 	DRM_FORMAT_XBGR2101010,
7905 	DRM_FORMAT_ARGB2101010,
7906 	DRM_FORMAT_ABGR2101010,
7907 	DRM_FORMAT_XRGB16161616,
7908 	DRM_FORMAT_XBGR16161616,
7909 	DRM_FORMAT_ARGB16161616,
7910 	DRM_FORMAT_ABGR16161616,
7911 	DRM_FORMAT_XBGR8888,
7912 	DRM_FORMAT_ABGR8888,
7913 	DRM_FORMAT_RGB565,
7914 };
7915 
7916 static const uint32_t overlay_formats[] = {
7917 	DRM_FORMAT_XRGB8888,
7918 	DRM_FORMAT_ARGB8888,
7919 	DRM_FORMAT_RGBA8888,
7920 	DRM_FORMAT_XBGR8888,
7921 	DRM_FORMAT_ABGR8888,
7922 	DRM_FORMAT_RGB565
7923 };
7924 
7925 static const u32 cursor_formats[] = {
7926 	DRM_FORMAT_ARGB8888
7927 };
7928 
7929 static int get_plane_formats(const struct drm_plane *plane,
7930 			     const struct dc_plane_cap *plane_cap,
7931 			     uint32_t *formats, int max_formats)
7932 {
7933 	int i, num_formats = 0;
7934 
7935 	/*
7936 	 * TODO: Query support for each group of formats directly from
7937 	 * DC plane caps. This will require adding more formats to the
7938 	 * caps list.
7939 	 */
7940 
7941 	switch (plane->type) {
7942 	case DRM_PLANE_TYPE_PRIMARY:
7943 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7944 			if (num_formats >= max_formats)
7945 				break;
7946 
7947 			formats[num_formats++] = rgb_formats[i];
7948 		}
7949 
7950 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7951 			formats[num_formats++] = DRM_FORMAT_NV12;
7952 		if (plane_cap && plane_cap->pixel_format_support.p010)
7953 			formats[num_formats++] = DRM_FORMAT_P010;
7954 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7955 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7956 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7957 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7958 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7959 		}
7960 		break;
7961 
7962 	case DRM_PLANE_TYPE_OVERLAY:
7963 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7964 			if (num_formats >= max_formats)
7965 				break;
7966 
7967 			formats[num_formats++] = overlay_formats[i];
7968 		}
7969 		break;
7970 
7971 	case DRM_PLANE_TYPE_CURSOR:
7972 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7973 			if (num_formats >= max_formats)
7974 				break;
7975 
7976 			formats[num_formats++] = cursor_formats[i];
7977 		}
7978 		break;
7979 	}
7980 
7981 	return num_formats;
7982 }
7983 
7984 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7985 				struct drm_plane *plane,
7986 				unsigned long possible_crtcs,
7987 				const struct dc_plane_cap *plane_cap)
7988 {
7989 	uint32_t formats[32];
7990 	int num_formats;
7991 	int res = -EPERM;
7992 	unsigned int supported_rotations;
7993 	uint64_t *modifiers = NULL;
7994 
7995 	num_formats = get_plane_formats(plane, plane_cap, formats,
7996 					ARRAY_SIZE(formats));
7997 
7998 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7999 	if (res)
8000 		return res;
8001 
8002 	if (modifiers == NULL)
8003 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8004 
8005 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
8006 				       &dm_plane_funcs, formats, num_formats,
8007 				       modifiers, plane->type, NULL);
8008 	kfree(modifiers);
8009 	if (res)
8010 		return res;
8011 
8012 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8013 	    plane_cap && plane_cap->per_pixel_alpha) {
8014 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
8015 					  BIT(DRM_MODE_BLEND_PREMULTI) |
8016 					  BIT(DRM_MODE_BLEND_COVERAGE);
8017 
8018 		drm_plane_create_alpha_property(plane);
8019 		drm_plane_create_blend_mode_property(plane, blend_caps);
8020 	}
8021 
8022 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
8023 	    plane_cap &&
8024 	    (plane_cap->pixel_format_support.nv12 ||
8025 	     plane_cap->pixel_format_support.p010)) {
8026 		/* This only affects YUV formats. */
8027 		drm_plane_create_color_properties(
8028 			plane,
8029 			BIT(DRM_COLOR_YCBCR_BT601) |
8030 			BIT(DRM_COLOR_YCBCR_BT709) |
8031 			BIT(DRM_COLOR_YCBCR_BT2020),
8032 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8033 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8034 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8035 	}
8036 
8037 	supported_rotations =
8038 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8039 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8040 
8041 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
8042 	    plane->type != DRM_PLANE_TYPE_CURSOR)
8043 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8044 						   supported_rotations);
8045 
8046 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8047 
8048 	/* Create (reset) the plane state */
8049 	if (plane->funcs->reset)
8050 		plane->funcs->reset(plane);
8051 
8052 	return 0;
8053 }
8054 
8055 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8056 			       struct drm_plane *plane,
8057 			       uint32_t crtc_index)
8058 {
8059 	struct amdgpu_crtc *acrtc = NULL;
8060 	struct drm_plane *cursor_plane;
8061 
8062 	int res = -ENOMEM;
8063 
8064 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8065 	if (!cursor_plane)
8066 		goto fail;
8067 
8068 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8069 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8070 
8071 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8072 	if (!acrtc)
8073 		goto fail;
8074 
8075 	res = drm_crtc_init_with_planes(
8076 			dm->ddev,
8077 			&acrtc->base,
8078 			plane,
8079 			cursor_plane,
8080 			&amdgpu_dm_crtc_funcs, NULL);
8081 
8082 	if (res)
8083 		goto fail;
8084 
8085 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8086 
8087 	/* Create (reset) the plane state */
8088 	if (acrtc->base.funcs->reset)
8089 		acrtc->base.funcs->reset(&acrtc->base);
8090 
8091 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8092 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8093 
8094 	acrtc->crtc_id = crtc_index;
8095 	acrtc->base.enabled = false;
8096 	acrtc->otg_inst = -1;
8097 
8098 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8099 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8100 				   true, MAX_COLOR_LUT_ENTRIES);
8101 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8102 
8103 	return 0;
8104 
8105 fail:
8106 	kfree(acrtc);
8107 	kfree(cursor_plane);
8108 	return res;
8109 }
8110 
8111 
8112 static int to_drm_connector_type(enum signal_type st)
8113 {
8114 	switch (st) {
8115 	case SIGNAL_TYPE_HDMI_TYPE_A:
8116 		return DRM_MODE_CONNECTOR_HDMIA;
8117 	case SIGNAL_TYPE_EDP:
8118 		return DRM_MODE_CONNECTOR_eDP;
8119 	case SIGNAL_TYPE_LVDS:
8120 		return DRM_MODE_CONNECTOR_LVDS;
8121 	case SIGNAL_TYPE_RGB:
8122 		return DRM_MODE_CONNECTOR_VGA;
8123 	case SIGNAL_TYPE_DISPLAY_PORT:
8124 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8125 		return DRM_MODE_CONNECTOR_DisplayPort;
8126 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8127 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8128 		return DRM_MODE_CONNECTOR_DVID;
8129 	case SIGNAL_TYPE_VIRTUAL:
8130 		return DRM_MODE_CONNECTOR_VIRTUAL;
8131 
8132 	default:
8133 		return DRM_MODE_CONNECTOR_Unknown;
8134 	}
8135 }
8136 
8137 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8138 {
8139 	struct drm_encoder *encoder;
8140 
8141 	/* There is only one encoder per connector */
8142 	drm_connector_for_each_possible_encoder(connector, encoder)
8143 		return encoder;
8144 
8145 	return NULL;
8146 }
8147 
8148 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8149 {
8150 	struct drm_encoder *encoder;
8151 	struct amdgpu_encoder *amdgpu_encoder;
8152 
8153 	encoder = amdgpu_dm_connector_to_encoder(connector);
8154 
8155 	if (encoder == NULL)
8156 		return;
8157 
8158 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8159 
8160 	amdgpu_encoder->native_mode.clock = 0;
8161 
8162 	if (!list_empty(&connector->probed_modes)) {
8163 		struct drm_display_mode *preferred_mode = NULL;
8164 
8165 		list_for_each_entry(preferred_mode,
8166 				    &connector->probed_modes,
8167 				    head) {
8168 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8169 				amdgpu_encoder->native_mode = *preferred_mode;
8170 
8171 			break;
8172 		}
8173 
8174 	}
8175 }
8176 
8177 static struct drm_display_mode *
8178 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8179 			     char *name,
8180 			     int hdisplay, int vdisplay)
8181 {
8182 	struct drm_device *dev = encoder->dev;
8183 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8184 	struct drm_display_mode *mode = NULL;
8185 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8186 
8187 	mode = drm_mode_duplicate(dev, native_mode);
8188 
8189 	if (mode == NULL)
8190 		return NULL;
8191 
8192 	mode->hdisplay = hdisplay;
8193 	mode->vdisplay = vdisplay;
8194 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8195 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8196 
8197 	return mode;
8198 
8199 }
8200 
8201 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8202 						 struct drm_connector *connector)
8203 {
8204 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8205 	struct drm_display_mode *mode = NULL;
8206 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8207 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8208 				to_amdgpu_dm_connector(connector);
8209 	int i;
8210 	int n;
8211 	struct mode_size {
8212 		char name[DRM_DISPLAY_MODE_LEN];
8213 		int w;
8214 		int h;
8215 	} common_modes[] = {
8216 		{  "640x480",  640,  480},
8217 		{  "800x600",  800,  600},
8218 		{ "1024x768", 1024,  768},
8219 		{ "1280x720", 1280,  720},
8220 		{ "1280x800", 1280,  800},
8221 		{"1280x1024", 1280, 1024},
8222 		{ "1440x900", 1440,  900},
8223 		{"1680x1050", 1680, 1050},
8224 		{"1600x1200", 1600, 1200},
8225 		{"1920x1080", 1920, 1080},
8226 		{"1920x1200", 1920, 1200}
8227 	};
8228 
8229 	n = ARRAY_SIZE(common_modes);
8230 
8231 	for (i = 0; i < n; i++) {
8232 		struct drm_display_mode *curmode = NULL;
8233 		bool mode_existed = false;
8234 
8235 		if (common_modes[i].w > native_mode->hdisplay ||
8236 		    common_modes[i].h > native_mode->vdisplay ||
8237 		   (common_modes[i].w == native_mode->hdisplay &&
8238 		    common_modes[i].h == native_mode->vdisplay))
8239 			continue;
8240 
8241 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8242 			if (common_modes[i].w == curmode->hdisplay &&
8243 			    common_modes[i].h == curmode->vdisplay) {
8244 				mode_existed = true;
8245 				break;
8246 			}
8247 		}
8248 
8249 		if (mode_existed)
8250 			continue;
8251 
8252 		mode = amdgpu_dm_create_common_mode(encoder,
8253 				common_modes[i].name, common_modes[i].w,
8254 				common_modes[i].h);
8255 		if (!mode)
8256 			continue;
8257 
8258 		drm_mode_probed_add(connector, mode);
8259 		amdgpu_dm_connector->num_modes++;
8260 	}
8261 }
8262 
8263 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8264 {
8265 	struct drm_encoder *encoder;
8266 	struct amdgpu_encoder *amdgpu_encoder;
8267 	const struct drm_display_mode *native_mode;
8268 
8269 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8270 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8271 		return;
8272 
8273 	encoder = amdgpu_dm_connector_to_encoder(connector);
8274 	if (!encoder)
8275 		return;
8276 
8277 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8278 
8279 	native_mode = &amdgpu_encoder->native_mode;
8280 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8281 		return;
8282 
8283 	drm_connector_set_panel_orientation_with_quirk(connector,
8284 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8285 						       native_mode->hdisplay,
8286 						       native_mode->vdisplay);
8287 }
8288 
8289 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8290 					      struct edid *edid)
8291 {
8292 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8293 			to_amdgpu_dm_connector(connector);
8294 
8295 	if (edid) {
8296 		/* empty probed_modes */
8297 		INIT_LIST_HEAD(&connector->probed_modes);
8298 		amdgpu_dm_connector->num_modes =
8299 				drm_add_edid_modes(connector, edid);
8300 
8301 		/* sorting the probed modes before calling function
8302 		 * amdgpu_dm_get_native_mode() since EDID can have
8303 		 * more than one preferred mode. The modes that are
8304 		 * later in the probed mode list could be of higher
8305 		 * and preferred resolution. For example, 3840x2160
8306 		 * resolution in base EDID preferred timing and 4096x2160
8307 		 * preferred resolution in DID extension block later.
8308 		 */
8309 		drm_mode_sort(&connector->probed_modes);
8310 		amdgpu_dm_get_native_mode(connector);
8311 
8312 		/* Freesync capabilities are reset by calling
8313 		 * drm_add_edid_modes() and need to be
8314 		 * restored here.
8315 		 */
8316 		amdgpu_dm_update_freesync_caps(connector, edid);
8317 
8318 		amdgpu_set_panel_orientation(connector);
8319 	} else {
8320 		amdgpu_dm_connector->num_modes = 0;
8321 	}
8322 }
8323 
8324 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8325 			      struct drm_display_mode *mode)
8326 {
8327 	struct drm_display_mode *m;
8328 
8329 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8330 		if (drm_mode_equal(m, mode))
8331 			return true;
8332 	}
8333 
8334 	return false;
8335 }
8336 
8337 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8338 {
8339 	const struct drm_display_mode *m;
8340 	struct drm_display_mode *new_mode;
8341 	uint i;
8342 	uint32_t new_modes_count = 0;
8343 
8344 	/* Standard FPS values
8345 	 *
8346 	 * 23.976       - TV/NTSC
8347 	 * 24 	        - Cinema
8348 	 * 25 	        - TV/PAL
8349 	 * 29.97        - TV/NTSC
8350 	 * 30 	        - TV/NTSC
8351 	 * 48 	        - Cinema HFR
8352 	 * 50 	        - TV/PAL
8353 	 * 60 	        - Commonly used
8354 	 * 48,72,96,120 - Multiples of 24
8355 	 */
8356 	static const uint32_t common_rates[] = {
8357 		23976, 24000, 25000, 29970, 30000,
8358 		48000, 50000, 60000, 72000, 96000, 120000
8359 	};
8360 
8361 	/*
8362 	 * Find mode with highest refresh rate with the same resolution
8363 	 * as the preferred mode. Some monitors report a preferred mode
8364 	 * with lower resolution than the highest refresh rate supported.
8365 	 */
8366 
8367 	m = get_highest_refresh_rate_mode(aconnector, true);
8368 	if (!m)
8369 		return 0;
8370 
8371 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8372 		uint64_t target_vtotal, target_vtotal_diff;
8373 		uint64_t num, den;
8374 
8375 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8376 			continue;
8377 
8378 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8379 		    common_rates[i] > aconnector->max_vfreq * 1000)
8380 			continue;
8381 
8382 		num = (unsigned long long)m->clock * 1000 * 1000;
8383 		den = common_rates[i] * (unsigned long long)m->htotal;
8384 		target_vtotal = div_u64(num, den);
8385 		target_vtotal_diff = target_vtotal - m->vtotal;
8386 
8387 		/* Check for illegal modes */
8388 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8389 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8390 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8391 			continue;
8392 
8393 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8394 		if (!new_mode)
8395 			goto out;
8396 
8397 		new_mode->vtotal += (u16)target_vtotal_diff;
8398 		new_mode->vsync_start += (u16)target_vtotal_diff;
8399 		new_mode->vsync_end += (u16)target_vtotal_diff;
8400 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8401 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8402 
8403 		if (!is_duplicate_mode(aconnector, new_mode)) {
8404 			drm_mode_probed_add(&aconnector->base, new_mode);
8405 			new_modes_count += 1;
8406 		} else
8407 			drm_mode_destroy(aconnector->base.dev, new_mode);
8408 	}
8409  out:
8410 	return new_modes_count;
8411 }
8412 
8413 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8414 						   struct edid *edid)
8415 {
8416 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8417 		to_amdgpu_dm_connector(connector);
8418 
8419 	if (!edid)
8420 		return;
8421 
8422 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8423 		amdgpu_dm_connector->num_modes +=
8424 			add_fs_modes(amdgpu_dm_connector);
8425 }
8426 
8427 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8428 {
8429 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8430 			to_amdgpu_dm_connector(connector);
8431 	struct drm_encoder *encoder;
8432 	struct edid *edid = amdgpu_dm_connector->edid;
8433 
8434 	encoder = amdgpu_dm_connector_to_encoder(connector);
8435 
8436 	if (!drm_edid_is_valid(edid)) {
8437 		amdgpu_dm_connector->num_modes =
8438 				drm_add_modes_noedid(connector, 640, 480);
8439 	} else {
8440 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8441 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8442 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8443 	}
8444 	amdgpu_dm_fbc_init(connector);
8445 
8446 	return amdgpu_dm_connector->num_modes;
8447 }
8448 
8449 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8450 				     struct amdgpu_dm_connector *aconnector,
8451 				     int connector_type,
8452 				     struct dc_link *link,
8453 				     int link_index)
8454 {
8455 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8456 
8457 	/*
8458 	 * Some of the properties below require access to state, like bpc.
8459 	 * Allocate some default initial connector state with our reset helper.
8460 	 */
8461 	if (aconnector->base.funcs->reset)
8462 		aconnector->base.funcs->reset(&aconnector->base);
8463 
8464 	aconnector->connector_id = link_index;
8465 	aconnector->dc_link = link;
8466 	aconnector->base.interlace_allowed = false;
8467 	aconnector->base.doublescan_allowed = false;
8468 	aconnector->base.stereo_allowed = false;
8469 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8470 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8471 	aconnector->audio_inst = -1;
8472 	mutex_init(&aconnector->hpd_lock);
8473 
8474 	/*
8475 	 * configure support HPD hot plug connector_>polled default value is 0
8476 	 * which means HPD hot plug not supported
8477 	 */
8478 	switch (connector_type) {
8479 	case DRM_MODE_CONNECTOR_HDMIA:
8480 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8481 		aconnector->base.ycbcr_420_allowed =
8482 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8483 		break;
8484 	case DRM_MODE_CONNECTOR_DisplayPort:
8485 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8486 		link->link_enc = link_enc_cfg_get_link_enc(link);
8487 		ASSERT(link->link_enc);
8488 		if (link->link_enc)
8489 			aconnector->base.ycbcr_420_allowed =
8490 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8491 		break;
8492 	case DRM_MODE_CONNECTOR_DVID:
8493 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8494 		break;
8495 	default:
8496 		break;
8497 	}
8498 
8499 	drm_object_attach_property(&aconnector->base.base,
8500 				dm->ddev->mode_config.scaling_mode_property,
8501 				DRM_MODE_SCALE_NONE);
8502 
8503 	drm_object_attach_property(&aconnector->base.base,
8504 				adev->mode_info.underscan_property,
8505 				UNDERSCAN_OFF);
8506 	drm_object_attach_property(&aconnector->base.base,
8507 				adev->mode_info.underscan_hborder_property,
8508 				0);
8509 	drm_object_attach_property(&aconnector->base.base,
8510 				adev->mode_info.underscan_vborder_property,
8511 				0);
8512 
8513 	if (!aconnector->mst_port)
8514 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8515 
8516 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8517 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8518 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8519 
8520 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8521 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8522 		drm_object_attach_property(&aconnector->base.base,
8523 				adev->mode_info.abm_level_property, 0);
8524 	}
8525 
8526 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8527 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8528 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8529 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8530 
8531 		if (!aconnector->mst_port)
8532 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8533 
8534 #ifdef CONFIG_DRM_AMD_DC_HDCP
8535 		if (adev->dm.hdcp_workqueue)
8536 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8537 #endif
8538 	}
8539 }
8540 
8541 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8542 			      struct i2c_msg *msgs, int num)
8543 {
8544 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8545 	struct ddc_service *ddc_service = i2c->ddc_service;
8546 	struct i2c_command cmd;
8547 	int i;
8548 	int result = -EIO;
8549 
8550 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8551 
8552 	if (!cmd.payloads)
8553 		return result;
8554 
8555 	cmd.number_of_payloads = num;
8556 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8557 	cmd.speed = 100;
8558 
8559 	for (i = 0; i < num; i++) {
8560 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8561 		cmd.payloads[i].address = msgs[i].addr;
8562 		cmd.payloads[i].length = msgs[i].len;
8563 		cmd.payloads[i].data = msgs[i].buf;
8564 	}
8565 
8566 	if (dc_submit_i2c(
8567 			ddc_service->ctx->dc,
8568 			ddc_service->ddc_pin->hw_info.ddc_channel,
8569 			&cmd))
8570 		result = num;
8571 
8572 	kfree(cmd.payloads);
8573 	return result;
8574 }
8575 
8576 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8577 {
8578 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8579 }
8580 
8581 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8582 	.master_xfer = amdgpu_dm_i2c_xfer,
8583 	.functionality = amdgpu_dm_i2c_func,
8584 };
8585 
8586 static struct amdgpu_i2c_adapter *
8587 create_i2c(struct ddc_service *ddc_service,
8588 	   int link_index,
8589 	   int *res)
8590 {
8591 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8592 	struct amdgpu_i2c_adapter *i2c;
8593 
8594 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8595 	if (!i2c)
8596 		return NULL;
8597 	i2c->base.owner = THIS_MODULE;
8598 	i2c->base.class = I2C_CLASS_DDC;
8599 	i2c->base.dev.parent = &adev->pdev->dev;
8600 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8601 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8602 	i2c_set_adapdata(&i2c->base, i2c);
8603 	i2c->ddc_service = ddc_service;
8604 	if (i2c->ddc_service->ddc_pin)
8605 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8606 
8607 	return i2c;
8608 }
8609 
8610 
8611 /*
8612  * Note: this function assumes that dc_link_detect() was called for the
8613  * dc_link which will be represented by this aconnector.
8614  */
8615 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8616 				    struct amdgpu_dm_connector *aconnector,
8617 				    uint32_t link_index,
8618 				    struct amdgpu_encoder *aencoder)
8619 {
8620 	int res = 0;
8621 	int connector_type;
8622 	struct dc *dc = dm->dc;
8623 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8624 	struct amdgpu_i2c_adapter *i2c;
8625 
8626 	link->priv = aconnector;
8627 
8628 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8629 
8630 	i2c = create_i2c(link->ddc, link->link_index, &res);
8631 	if (!i2c) {
8632 		DRM_ERROR("Failed to create i2c adapter data\n");
8633 		return -ENOMEM;
8634 	}
8635 
8636 	aconnector->i2c = i2c;
8637 	res = i2c_add_adapter(&i2c->base);
8638 
8639 	if (res) {
8640 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8641 		goto out_free;
8642 	}
8643 
8644 	connector_type = to_drm_connector_type(link->connector_signal);
8645 
8646 	res = drm_connector_init_with_ddc(
8647 			dm->ddev,
8648 			&aconnector->base,
8649 			&amdgpu_dm_connector_funcs,
8650 			connector_type,
8651 			&i2c->base);
8652 
8653 	if (res) {
8654 		DRM_ERROR("connector_init failed\n");
8655 		aconnector->connector_id = -1;
8656 		goto out_free;
8657 	}
8658 
8659 	drm_connector_helper_add(
8660 			&aconnector->base,
8661 			&amdgpu_dm_connector_helper_funcs);
8662 
8663 	amdgpu_dm_connector_init_helper(
8664 		dm,
8665 		aconnector,
8666 		connector_type,
8667 		link,
8668 		link_index);
8669 
8670 	drm_connector_attach_encoder(
8671 		&aconnector->base, &aencoder->base);
8672 
8673 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8674 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8675 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8676 
8677 out_free:
8678 	if (res) {
8679 		kfree(i2c);
8680 		aconnector->i2c = NULL;
8681 	}
8682 	return res;
8683 }
8684 
8685 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8686 {
8687 	switch (adev->mode_info.num_crtc) {
8688 	case 1:
8689 		return 0x1;
8690 	case 2:
8691 		return 0x3;
8692 	case 3:
8693 		return 0x7;
8694 	case 4:
8695 		return 0xf;
8696 	case 5:
8697 		return 0x1f;
8698 	case 6:
8699 	default:
8700 		return 0x3f;
8701 	}
8702 }
8703 
8704 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8705 				  struct amdgpu_encoder *aencoder,
8706 				  uint32_t link_index)
8707 {
8708 	struct amdgpu_device *adev = drm_to_adev(dev);
8709 
8710 	int res = drm_encoder_init(dev,
8711 				   &aencoder->base,
8712 				   &amdgpu_dm_encoder_funcs,
8713 				   DRM_MODE_ENCODER_TMDS,
8714 				   NULL);
8715 
8716 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8717 
8718 	if (!res)
8719 		aencoder->encoder_id = link_index;
8720 	else
8721 		aencoder->encoder_id = -1;
8722 
8723 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8724 
8725 	return res;
8726 }
8727 
8728 static void manage_dm_interrupts(struct amdgpu_device *adev,
8729 				 struct amdgpu_crtc *acrtc,
8730 				 bool enable)
8731 {
8732 	/*
8733 	 * We have no guarantee that the frontend index maps to the same
8734 	 * backend index - some even map to more than one.
8735 	 *
8736 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8737 	 */
8738 	int irq_type =
8739 		amdgpu_display_crtc_idx_to_irq_type(
8740 			adev,
8741 			acrtc->crtc_id);
8742 
8743 	if (enable) {
8744 		drm_crtc_vblank_on(&acrtc->base);
8745 		amdgpu_irq_get(
8746 			adev,
8747 			&adev->pageflip_irq,
8748 			irq_type);
8749 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8750 		amdgpu_irq_get(
8751 			adev,
8752 			&adev->vline0_irq,
8753 			irq_type);
8754 #endif
8755 	} else {
8756 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8757 		amdgpu_irq_put(
8758 			adev,
8759 			&adev->vline0_irq,
8760 			irq_type);
8761 #endif
8762 		amdgpu_irq_put(
8763 			adev,
8764 			&adev->pageflip_irq,
8765 			irq_type);
8766 		drm_crtc_vblank_off(&acrtc->base);
8767 	}
8768 }
8769 
8770 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8771 				      struct amdgpu_crtc *acrtc)
8772 {
8773 	int irq_type =
8774 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8775 
8776 	/**
8777 	 * This reads the current state for the IRQ and force reapplies
8778 	 * the setting to hardware.
8779 	 */
8780 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8781 }
8782 
8783 static bool
8784 is_scaling_state_different(const struct dm_connector_state *dm_state,
8785 			   const struct dm_connector_state *old_dm_state)
8786 {
8787 	if (dm_state->scaling != old_dm_state->scaling)
8788 		return true;
8789 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8790 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8791 			return true;
8792 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8793 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8794 			return true;
8795 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8796 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8797 		return true;
8798 	return false;
8799 }
8800 
8801 #ifdef CONFIG_DRM_AMD_DC_HDCP
8802 static bool is_content_protection_different(struct drm_connector_state *state,
8803 					    const struct drm_connector_state *old_state,
8804 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8805 {
8806 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8807 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8808 
8809 	/* Handle: Type0/1 change */
8810 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8811 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8812 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8813 		return true;
8814 	}
8815 
8816 	/* CP is being re enabled, ignore this
8817 	 *
8818 	 * Handles:	ENABLED -> DESIRED
8819 	 */
8820 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8821 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8822 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8823 		return false;
8824 	}
8825 
8826 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8827 	 *
8828 	 * Handles:	UNDESIRED -> ENABLED
8829 	 */
8830 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8831 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8832 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8833 
8834 	/* Stream removed and re-enabled
8835 	 *
8836 	 * Can sometimes overlap with the HPD case,
8837 	 * thus set update_hdcp to false to avoid
8838 	 * setting HDCP multiple times.
8839 	 *
8840 	 * Handles:	DESIRED -> DESIRED (Special case)
8841 	 */
8842 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8843 		state->crtc && state->crtc->enabled &&
8844 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8845 		dm_con_state->update_hdcp = false;
8846 		return true;
8847 	}
8848 
8849 	/* Hot-plug, headless s3, dpms
8850 	 *
8851 	 * Only start HDCP if the display is connected/enabled.
8852 	 * update_hdcp flag will be set to false until the next
8853 	 * HPD comes in.
8854 	 *
8855 	 * Handles:	DESIRED -> DESIRED (Special case)
8856 	 */
8857 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8858 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8859 		dm_con_state->update_hdcp = false;
8860 		return true;
8861 	}
8862 
8863 	/*
8864 	 * Handles:	UNDESIRED -> UNDESIRED
8865 	 *		DESIRED -> DESIRED
8866 	 *		ENABLED -> ENABLED
8867 	 */
8868 	if (old_state->content_protection == state->content_protection)
8869 		return false;
8870 
8871 	/*
8872 	 * Handles:	UNDESIRED -> DESIRED
8873 	 *		DESIRED -> UNDESIRED
8874 	 *		ENABLED -> UNDESIRED
8875 	 */
8876 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8877 		return true;
8878 
8879 	/*
8880 	 * Handles:	DESIRED -> ENABLED
8881 	 */
8882 	return false;
8883 }
8884 
8885 #endif
8886 static void remove_stream(struct amdgpu_device *adev,
8887 			  struct amdgpu_crtc *acrtc,
8888 			  struct dc_stream_state *stream)
8889 {
8890 	/* this is the update mode case */
8891 
8892 	acrtc->otg_inst = -1;
8893 	acrtc->enabled = false;
8894 }
8895 
8896 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8897 			       struct dc_cursor_position *position)
8898 {
8899 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8900 	int x, y;
8901 	int xorigin = 0, yorigin = 0;
8902 
8903 	if (!crtc || !plane->state->fb)
8904 		return 0;
8905 
8906 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8907 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8908 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8909 			  __func__,
8910 			  plane->state->crtc_w,
8911 			  plane->state->crtc_h);
8912 		return -EINVAL;
8913 	}
8914 
8915 	x = plane->state->crtc_x;
8916 	y = plane->state->crtc_y;
8917 
8918 	if (x <= -amdgpu_crtc->max_cursor_width ||
8919 	    y <= -amdgpu_crtc->max_cursor_height)
8920 		return 0;
8921 
8922 	if (x < 0) {
8923 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8924 		x = 0;
8925 	}
8926 	if (y < 0) {
8927 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8928 		y = 0;
8929 	}
8930 	position->enable = true;
8931 	position->translate_by_source = true;
8932 	position->x = x;
8933 	position->y = y;
8934 	position->x_hotspot = xorigin;
8935 	position->y_hotspot = yorigin;
8936 
8937 	return 0;
8938 }
8939 
8940 static void handle_cursor_update(struct drm_plane *plane,
8941 				 struct drm_plane_state *old_plane_state)
8942 {
8943 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8944 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8945 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8946 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8947 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8948 	uint64_t address = afb ? afb->address : 0;
8949 	struct dc_cursor_position position = {0};
8950 	struct dc_cursor_attributes attributes;
8951 	int ret;
8952 
8953 	if (!plane->state->fb && !old_plane_state->fb)
8954 		return;
8955 
8956 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8957 		      __func__,
8958 		      amdgpu_crtc->crtc_id,
8959 		      plane->state->crtc_w,
8960 		      plane->state->crtc_h);
8961 
8962 	ret = get_cursor_position(plane, crtc, &position);
8963 	if (ret)
8964 		return;
8965 
8966 	if (!position.enable) {
8967 		/* turn off cursor */
8968 		if (crtc_state && crtc_state->stream) {
8969 			mutex_lock(&adev->dm.dc_lock);
8970 			dc_stream_set_cursor_position(crtc_state->stream,
8971 						      &position);
8972 			mutex_unlock(&adev->dm.dc_lock);
8973 		}
8974 		return;
8975 	}
8976 
8977 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8978 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8979 
8980 	memset(&attributes, 0, sizeof(attributes));
8981 	attributes.address.high_part = upper_32_bits(address);
8982 	attributes.address.low_part  = lower_32_bits(address);
8983 	attributes.width             = plane->state->crtc_w;
8984 	attributes.height            = plane->state->crtc_h;
8985 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8986 	attributes.rotation_angle    = 0;
8987 	attributes.attribute_flags.value = 0;
8988 
8989 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8990 
8991 	if (crtc_state->stream) {
8992 		mutex_lock(&adev->dm.dc_lock);
8993 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8994 							 &attributes))
8995 			DRM_ERROR("DC failed to set cursor attributes\n");
8996 
8997 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8998 						   &position))
8999 			DRM_ERROR("DC failed to set cursor position\n");
9000 		mutex_unlock(&adev->dm.dc_lock);
9001 	}
9002 }
9003 
9004 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9005 {
9006 
9007 	assert_spin_locked(&acrtc->base.dev->event_lock);
9008 	WARN_ON(acrtc->event);
9009 
9010 	acrtc->event = acrtc->base.state->event;
9011 
9012 	/* Set the flip status */
9013 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9014 
9015 	/* Mark this event as consumed */
9016 	acrtc->base.state->event = NULL;
9017 
9018 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9019 		     acrtc->crtc_id);
9020 }
9021 
9022 static void update_freesync_state_on_stream(
9023 	struct amdgpu_display_manager *dm,
9024 	struct dm_crtc_state *new_crtc_state,
9025 	struct dc_stream_state *new_stream,
9026 	struct dc_plane_state *surface,
9027 	u32 flip_timestamp_in_us)
9028 {
9029 	struct mod_vrr_params vrr_params;
9030 	struct dc_info_packet vrr_infopacket = {0};
9031 	struct amdgpu_device *adev = dm->adev;
9032 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9033 	unsigned long flags;
9034 	bool pack_sdp_v1_3 = false;
9035 
9036 	if (!new_stream)
9037 		return;
9038 
9039 	/*
9040 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9041 	 * For now it's sufficient to just guard against these conditions.
9042 	 */
9043 
9044 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9045 		return;
9046 
9047 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9048         vrr_params = acrtc->dm_irq_params.vrr_params;
9049 
9050 	if (surface) {
9051 		mod_freesync_handle_preflip(
9052 			dm->freesync_module,
9053 			surface,
9054 			new_stream,
9055 			flip_timestamp_in_us,
9056 			&vrr_params);
9057 
9058 		if (adev->family < AMDGPU_FAMILY_AI &&
9059 		    amdgpu_dm_vrr_active(new_crtc_state)) {
9060 			mod_freesync_handle_v_update(dm->freesync_module,
9061 						     new_stream, &vrr_params);
9062 
9063 			/* Need to call this before the frame ends. */
9064 			dc_stream_adjust_vmin_vmax(dm->dc,
9065 						   new_crtc_state->stream,
9066 						   &vrr_params.adjust);
9067 		}
9068 	}
9069 
9070 	mod_freesync_build_vrr_infopacket(
9071 		dm->freesync_module,
9072 		new_stream,
9073 		&vrr_params,
9074 		PACKET_TYPE_VRR,
9075 		TRANSFER_FUNC_UNKNOWN,
9076 		&vrr_infopacket,
9077 		pack_sdp_v1_3);
9078 
9079 	new_crtc_state->freesync_timing_changed |=
9080 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9081 			&vrr_params.adjust,
9082 			sizeof(vrr_params.adjust)) != 0);
9083 
9084 	new_crtc_state->freesync_vrr_info_changed |=
9085 		(memcmp(&new_crtc_state->vrr_infopacket,
9086 			&vrr_infopacket,
9087 			sizeof(vrr_infopacket)) != 0);
9088 
9089 	acrtc->dm_irq_params.vrr_params = vrr_params;
9090 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9091 
9092 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9093 	new_stream->vrr_infopacket = vrr_infopacket;
9094 
9095 	if (new_crtc_state->freesync_vrr_info_changed)
9096 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9097 			      new_crtc_state->base.crtc->base.id,
9098 			      (int)new_crtc_state->base.vrr_enabled,
9099 			      (int)vrr_params.state);
9100 
9101 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9102 }
9103 
9104 static void update_stream_irq_parameters(
9105 	struct amdgpu_display_manager *dm,
9106 	struct dm_crtc_state *new_crtc_state)
9107 {
9108 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9109 	struct mod_vrr_params vrr_params;
9110 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9111 	struct amdgpu_device *adev = dm->adev;
9112 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9113 	unsigned long flags;
9114 
9115 	if (!new_stream)
9116 		return;
9117 
9118 	/*
9119 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9120 	 * For now it's sufficient to just guard against these conditions.
9121 	 */
9122 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9123 		return;
9124 
9125 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9126 	vrr_params = acrtc->dm_irq_params.vrr_params;
9127 
9128 	if (new_crtc_state->vrr_supported &&
9129 	    config.min_refresh_in_uhz &&
9130 	    config.max_refresh_in_uhz) {
9131 		/*
9132 		 * if freesync compatible mode was set, config.state will be set
9133 		 * in atomic check
9134 		 */
9135 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9136 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9137 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9138 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9139 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9140 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9141 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9142 		} else {
9143 			config.state = new_crtc_state->base.vrr_enabled ?
9144 						     VRR_STATE_ACTIVE_VARIABLE :
9145 						     VRR_STATE_INACTIVE;
9146 		}
9147 	} else {
9148 		config.state = VRR_STATE_UNSUPPORTED;
9149 	}
9150 
9151 	mod_freesync_build_vrr_params(dm->freesync_module,
9152 				      new_stream,
9153 				      &config, &vrr_params);
9154 
9155 	new_crtc_state->freesync_timing_changed |=
9156 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9157 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9158 
9159 	new_crtc_state->freesync_config = config;
9160 	/* Copy state for access from DM IRQ handler */
9161 	acrtc->dm_irq_params.freesync_config = config;
9162 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9163 	acrtc->dm_irq_params.vrr_params = vrr_params;
9164 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9165 }
9166 
9167 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9168 					    struct dm_crtc_state *new_state)
9169 {
9170 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9171 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9172 
9173 	if (!old_vrr_active && new_vrr_active) {
9174 		/* Transition VRR inactive -> active:
9175 		 * While VRR is active, we must not disable vblank irq, as a
9176 		 * reenable after disable would compute bogus vblank/pflip
9177 		 * timestamps if it likely happened inside display front-porch.
9178 		 *
9179 		 * We also need vupdate irq for the actual core vblank handling
9180 		 * at end of vblank.
9181 		 */
9182 		dm_set_vupdate_irq(new_state->base.crtc, true);
9183 		drm_crtc_vblank_get(new_state->base.crtc);
9184 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9185 				 __func__, new_state->base.crtc->base.id);
9186 	} else if (old_vrr_active && !new_vrr_active) {
9187 		/* Transition VRR active -> inactive:
9188 		 * Allow vblank irq disable again for fixed refresh rate.
9189 		 */
9190 		dm_set_vupdate_irq(new_state->base.crtc, false);
9191 		drm_crtc_vblank_put(new_state->base.crtc);
9192 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9193 				 __func__, new_state->base.crtc->base.id);
9194 	}
9195 }
9196 
9197 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9198 {
9199 	struct drm_plane *plane;
9200 	struct drm_plane_state *old_plane_state;
9201 	int i;
9202 
9203 	/*
9204 	 * TODO: Make this per-stream so we don't issue redundant updates for
9205 	 * commits with multiple streams.
9206 	 */
9207 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9208 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9209 			handle_cursor_update(plane, old_plane_state);
9210 }
9211 
9212 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9213 				    struct dc_state *dc_state,
9214 				    struct drm_device *dev,
9215 				    struct amdgpu_display_manager *dm,
9216 				    struct drm_crtc *pcrtc,
9217 				    bool wait_for_vblank)
9218 {
9219 	uint32_t i;
9220 	uint64_t timestamp_ns;
9221 	struct drm_plane *plane;
9222 	struct drm_plane_state *old_plane_state, *new_plane_state;
9223 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9224 	struct drm_crtc_state *new_pcrtc_state =
9225 			drm_atomic_get_new_crtc_state(state, pcrtc);
9226 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9227 	struct dm_crtc_state *dm_old_crtc_state =
9228 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9229 	int planes_count = 0, vpos, hpos;
9230 	long r;
9231 	unsigned long flags;
9232 	struct amdgpu_bo *abo;
9233 	uint32_t target_vblank, last_flip_vblank;
9234 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9235 	bool pflip_present = false;
9236 	struct {
9237 		struct dc_surface_update surface_updates[MAX_SURFACES];
9238 		struct dc_plane_info plane_infos[MAX_SURFACES];
9239 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9240 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9241 		struct dc_stream_update stream_update;
9242 	} *bundle;
9243 
9244 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9245 
9246 	if (!bundle) {
9247 		dm_error("Failed to allocate update bundle\n");
9248 		goto cleanup;
9249 	}
9250 
9251 	/*
9252 	 * Disable the cursor first if we're disabling all the planes.
9253 	 * It'll remain on the screen after the planes are re-enabled
9254 	 * if we don't.
9255 	 */
9256 	if (acrtc_state->active_planes == 0)
9257 		amdgpu_dm_commit_cursors(state);
9258 
9259 	/* update planes when needed */
9260 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9261 		struct drm_crtc *crtc = new_plane_state->crtc;
9262 		struct drm_crtc_state *new_crtc_state;
9263 		struct drm_framebuffer *fb = new_plane_state->fb;
9264 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9265 		bool plane_needs_flip;
9266 		struct dc_plane_state *dc_plane;
9267 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9268 
9269 		/* Cursor plane is handled after stream updates */
9270 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9271 			continue;
9272 
9273 		if (!fb || !crtc || pcrtc != crtc)
9274 			continue;
9275 
9276 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9277 		if (!new_crtc_state->active)
9278 			continue;
9279 
9280 		dc_plane = dm_new_plane_state->dc_state;
9281 
9282 		bundle->surface_updates[planes_count].surface = dc_plane;
9283 		if (new_pcrtc_state->color_mgmt_changed) {
9284 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9285 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9286 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9287 		}
9288 
9289 		fill_dc_scaling_info(dm->adev, new_plane_state,
9290 				     &bundle->scaling_infos[planes_count]);
9291 
9292 		bundle->surface_updates[planes_count].scaling_info =
9293 			&bundle->scaling_infos[planes_count];
9294 
9295 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9296 
9297 		pflip_present = pflip_present || plane_needs_flip;
9298 
9299 		if (!plane_needs_flip) {
9300 			planes_count += 1;
9301 			continue;
9302 		}
9303 
9304 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9305 
9306 		/*
9307 		 * Wait for all fences on this FB. Do limited wait to avoid
9308 		 * deadlock during GPU reset when this fence will not signal
9309 		 * but we hold reservation lock for the BO.
9310 		 */
9311 		r = dma_resv_wait_timeout(abo->tbo.base.resv,
9312 					  DMA_RESV_USAGE_WRITE, false,
9313 					  msecs_to_jiffies(5000));
9314 		if (unlikely(r <= 0))
9315 			DRM_ERROR("Waiting for fences timed out!");
9316 
9317 		fill_dc_plane_info_and_addr(
9318 			dm->adev, new_plane_state,
9319 			afb->tiling_flags,
9320 			&bundle->plane_infos[planes_count],
9321 			&bundle->flip_addrs[planes_count].address,
9322 			afb->tmz_surface, false);
9323 
9324 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9325 				 new_plane_state->plane->index,
9326 				 bundle->plane_infos[planes_count].dcc.enable);
9327 
9328 		bundle->surface_updates[planes_count].plane_info =
9329 			&bundle->plane_infos[planes_count];
9330 
9331 		/*
9332 		 * Only allow immediate flips for fast updates that don't
9333 		 * change FB pitch, DCC state, rotation or mirroing.
9334 		 */
9335 		bundle->flip_addrs[planes_count].flip_immediate =
9336 			crtc->state->async_flip &&
9337 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9338 
9339 		timestamp_ns = ktime_get_ns();
9340 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9341 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9342 		bundle->surface_updates[planes_count].surface = dc_plane;
9343 
9344 		if (!bundle->surface_updates[planes_count].surface) {
9345 			DRM_ERROR("No surface for CRTC: id=%d\n",
9346 					acrtc_attach->crtc_id);
9347 			continue;
9348 		}
9349 
9350 		if (plane == pcrtc->primary)
9351 			update_freesync_state_on_stream(
9352 				dm,
9353 				acrtc_state,
9354 				acrtc_state->stream,
9355 				dc_plane,
9356 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9357 
9358 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9359 				 __func__,
9360 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9361 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9362 
9363 		planes_count += 1;
9364 
9365 	}
9366 
9367 	if (pflip_present) {
9368 		if (!vrr_active) {
9369 			/* Use old throttling in non-vrr fixed refresh rate mode
9370 			 * to keep flip scheduling based on target vblank counts
9371 			 * working in a backwards compatible way, e.g., for
9372 			 * clients using the GLX_OML_sync_control extension or
9373 			 * DRI3/Present extension with defined target_msc.
9374 			 */
9375 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9376 		}
9377 		else {
9378 			/* For variable refresh rate mode only:
9379 			 * Get vblank of last completed flip to avoid > 1 vrr
9380 			 * flips per video frame by use of throttling, but allow
9381 			 * flip programming anywhere in the possibly large
9382 			 * variable vrr vblank interval for fine-grained flip
9383 			 * timing control and more opportunity to avoid stutter
9384 			 * on late submission of flips.
9385 			 */
9386 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9387 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9388 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9389 		}
9390 
9391 		target_vblank = last_flip_vblank + wait_for_vblank;
9392 
9393 		/*
9394 		 * Wait until we're out of the vertical blank period before the one
9395 		 * targeted by the flip
9396 		 */
9397 		while ((acrtc_attach->enabled &&
9398 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9399 							    0, &vpos, &hpos, NULL,
9400 							    NULL, &pcrtc->hwmode)
9401 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9402 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9403 			(int)(target_vblank -
9404 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9405 			usleep_range(1000, 1100);
9406 		}
9407 
9408 		/**
9409 		 * Prepare the flip event for the pageflip interrupt to handle.
9410 		 *
9411 		 * This only works in the case where we've already turned on the
9412 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9413 		 * from 0 -> n planes we have to skip a hardware generated event
9414 		 * and rely on sending it from software.
9415 		 */
9416 		if (acrtc_attach->base.state->event &&
9417 		    acrtc_state->active_planes > 0 &&
9418 		    !acrtc_state->force_dpms_off) {
9419 			drm_crtc_vblank_get(pcrtc);
9420 
9421 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9422 
9423 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9424 			prepare_flip_isr(acrtc_attach);
9425 
9426 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9427 		}
9428 
9429 		if (acrtc_state->stream) {
9430 			if (acrtc_state->freesync_vrr_info_changed)
9431 				bundle->stream_update.vrr_infopacket =
9432 					&acrtc_state->stream->vrr_infopacket;
9433 		}
9434 	}
9435 
9436 	/* Update the planes if changed or disable if we don't have any. */
9437 	if ((planes_count || acrtc_state->active_planes == 0) &&
9438 		acrtc_state->stream) {
9439 		/*
9440 		 * If PSR or idle optimizations are enabled then flush out
9441 		 * any pending work before hardware programming.
9442 		 */
9443 		if (dm->vblank_control_workqueue)
9444 			flush_workqueue(dm->vblank_control_workqueue);
9445 
9446 		bundle->stream_update.stream = acrtc_state->stream;
9447 		if (new_pcrtc_state->mode_changed) {
9448 			bundle->stream_update.src = acrtc_state->stream->src;
9449 			bundle->stream_update.dst = acrtc_state->stream->dst;
9450 		}
9451 
9452 		if (new_pcrtc_state->color_mgmt_changed) {
9453 			/*
9454 			 * TODO: This isn't fully correct since we've actually
9455 			 * already modified the stream in place.
9456 			 */
9457 			bundle->stream_update.gamut_remap =
9458 				&acrtc_state->stream->gamut_remap_matrix;
9459 			bundle->stream_update.output_csc_transform =
9460 				&acrtc_state->stream->csc_color_matrix;
9461 			bundle->stream_update.out_transfer_func =
9462 				acrtc_state->stream->out_transfer_func;
9463 		}
9464 
9465 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9466 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9467 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9468 
9469 		/*
9470 		 * If FreeSync state on the stream has changed then we need to
9471 		 * re-adjust the min/max bounds now that DC doesn't handle this
9472 		 * as part of commit.
9473 		 */
9474 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9475 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9476 			dc_stream_adjust_vmin_vmax(
9477 				dm->dc, acrtc_state->stream,
9478 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9479 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9480 		}
9481 		mutex_lock(&dm->dc_lock);
9482 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9483 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9484 			amdgpu_dm_psr_disable(acrtc_state->stream);
9485 
9486 		dc_commit_updates_for_stream(dm->dc,
9487 						     bundle->surface_updates,
9488 						     planes_count,
9489 						     acrtc_state->stream,
9490 						     &bundle->stream_update,
9491 						     dc_state);
9492 
9493 		/**
9494 		 * Enable or disable the interrupts on the backend.
9495 		 *
9496 		 * Most pipes are put into power gating when unused.
9497 		 *
9498 		 * When power gating is enabled on a pipe we lose the
9499 		 * interrupt enablement state when power gating is disabled.
9500 		 *
9501 		 * So we need to update the IRQ control state in hardware
9502 		 * whenever the pipe turns on (since it could be previously
9503 		 * power gated) or off (since some pipes can't be power gated
9504 		 * on some ASICs).
9505 		 */
9506 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9507 			dm_update_pflip_irq_state(drm_to_adev(dev),
9508 						  acrtc_attach);
9509 
9510 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9511 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9512 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9513 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9514 
9515 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9516 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9517 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9518 			struct amdgpu_dm_connector *aconn =
9519 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9520 
9521 			if (aconn->psr_skip_count > 0)
9522 				aconn->psr_skip_count--;
9523 
9524 			/* Allow PSR when skip count is 0. */
9525 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9526 		} else {
9527 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9528 		}
9529 
9530 		mutex_unlock(&dm->dc_lock);
9531 	}
9532 
9533 	/*
9534 	 * Update cursor state *after* programming all the planes.
9535 	 * This avoids redundant programming in the case where we're going
9536 	 * to be disabling a single plane - those pipes are being disabled.
9537 	 */
9538 	if (acrtc_state->active_planes)
9539 		amdgpu_dm_commit_cursors(state);
9540 
9541 cleanup:
9542 	kfree(bundle);
9543 }
9544 
9545 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9546 				   struct drm_atomic_state *state)
9547 {
9548 	struct amdgpu_device *adev = drm_to_adev(dev);
9549 	struct amdgpu_dm_connector *aconnector;
9550 	struct drm_connector *connector;
9551 	struct drm_connector_state *old_con_state, *new_con_state;
9552 	struct drm_crtc_state *new_crtc_state;
9553 	struct dm_crtc_state *new_dm_crtc_state;
9554 	const struct dc_stream_status *status;
9555 	int i, inst;
9556 
9557 	/* Notify device removals. */
9558 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9559 		if (old_con_state->crtc != new_con_state->crtc) {
9560 			/* CRTC changes require notification. */
9561 			goto notify;
9562 		}
9563 
9564 		if (!new_con_state->crtc)
9565 			continue;
9566 
9567 		new_crtc_state = drm_atomic_get_new_crtc_state(
9568 			state, new_con_state->crtc);
9569 
9570 		if (!new_crtc_state)
9571 			continue;
9572 
9573 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9574 			continue;
9575 
9576 	notify:
9577 		aconnector = to_amdgpu_dm_connector(connector);
9578 
9579 		mutex_lock(&adev->dm.audio_lock);
9580 		inst = aconnector->audio_inst;
9581 		aconnector->audio_inst = -1;
9582 		mutex_unlock(&adev->dm.audio_lock);
9583 
9584 		amdgpu_dm_audio_eld_notify(adev, inst);
9585 	}
9586 
9587 	/* Notify audio device additions. */
9588 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9589 		if (!new_con_state->crtc)
9590 			continue;
9591 
9592 		new_crtc_state = drm_atomic_get_new_crtc_state(
9593 			state, new_con_state->crtc);
9594 
9595 		if (!new_crtc_state)
9596 			continue;
9597 
9598 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9599 			continue;
9600 
9601 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9602 		if (!new_dm_crtc_state->stream)
9603 			continue;
9604 
9605 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9606 		if (!status)
9607 			continue;
9608 
9609 		aconnector = to_amdgpu_dm_connector(connector);
9610 
9611 		mutex_lock(&adev->dm.audio_lock);
9612 		inst = status->audio_inst;
9613 		aconnector->audio_inst = inst;
9614 		mutex_unlock(&adev->dm.audio_lock);
9615 
9616 		amdgpu_dm_audio_eld_notify(adev, inst);
9617 	}
9618 }
9619 
9620 /*
9621  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9622  * @crtc_state: the DRM CRTC state
9623  * @stream_state: the DC stream state.
9624  *
9625  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9626  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9627  */
9628 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9629 						struct dc_stream_state *stream_state)
9630 {
9631 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9632 }
9633 
9634 /**
9635  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9636  * @state: The atomic state to commit
9637  *
9638  * This will tell DC to commit the constructed DC state from atomic_check,
9639  * programming the hardware. Any failures here implies a hardware failure, since
9640  * atomic check should have filtered anything non-kosher.
9641  */
9642 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9643 {
9644 	struct drm_device *dev = state->dev;
9645 	struct amdgpu_device *adev = drm_to_adev(dev);
9646 	struct amdgpu_display_manager *dm = &adev->dm;
9647 	struct dm_atomic_state *dm_state;
9648 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9649 	uint32_t i, j;
9650 	struct drm_crtc *crtc;
9651 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9652 	unsigned long flags;
9653 	bool wait_for_vblank = true;
9654 	struct drm_connector *connector;
9655 	struct drm_connector_state *old_con_state, *new_con_state;
9656 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9657 	int crtc_disable_count = 0;
9658 	bool mode_set_reset_required = false;
9659 
9660 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9661 
9662 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9663 
9664 	dm_state = dm_atomic_get_new_state(state);
9665 	if (dm_state && dm_state->context) {
9666 		dc_state = dm_state->context;
9667 	} else {
9668 		/* No state changes, retain current state. */
9669 		dc_state_temp = dc_create_state(dm->dc);
9670 		ASSERT(dc_state_temp);
9671 		dc_state = dc_state_temp;
9672 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9673 	}
9674 
9675 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9676 				       new_crtc_state, i) {
9677 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9678 
9679 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9680 
9681 		if (old_crtc_state->active &&
9682 		    (!new_crtc_state->active ||
9683 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9684 			manage_dm_interrupts(adev, acrtc, false);
9685 			dc_stream_release(dm_old_crtc_state->stream);
9686 		}
9687 	}
9688 
9689 	drm_atomic_helper_calc_timestamping_constants(state);
9690 
9691 	/* update changed items */
9692 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9693 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9694 
9695 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9696 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9697 
9698 		drm_dbg_state(state->dev,
9699 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9700 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9701 			"connectors_changed:%d\n",
9702 			acrtc->crtc_id,
9703 			new_crtc_state->enable,
9704 			new_crtc_state->active,
9705 			new_crtc_state->planes_changed,
9706 			new_crtc_state->mode_changed,
9707 			new_crtc_state->active_changed,
9708 			new_crtc_state->connectors_changed);
9709 
9710 		/* Disable cursor if disabling crtc */
9711 		if (old_crtc_state->active && !new_crtc_state->active) {
9712 			struct dc_cursor_position position;
9713 
9714 			memset(&position, 0, sizeof(position));
9715 			mutex_lock(&dm->dc_lock);
9716 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9717 			mutex_unlock(&dm->dc_lock);
9718 		}
9719 
9720 		/* Copy all transient state flags into dc state */
9721 		if (dm_new_crtc_state->stream) {
9722 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9723 							    dm_new_crtc_state->stream);
9724 		}
9725 
9726 		/* handles headless hotplug case, updating new_state and
9727 		 * aconnector as needed
9728 		 */
9729 
9730 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9731 
9732 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9733 
9734 			if (!dm_new_crtc_state->stream) {
9735 				/*
9736 				 * this could happen because of issues with
9737 				 * userspace notifications delivery.
9738 				 * In this case userspace tries to set mode on
9739 				 * display which is disconnected in fact.
9740 				 * dc_sink is NULL in this case on aconnector.
9741 				 * We expect reset mode will come soon.
9742 				 *
9743 				 * This can also happen when unplug is done
9744 				 * during resume sequence ended
9745 				 *
9746 				 * In this case, we want to pretend we still
9747 				 * have a sink to keep the pipe running so that
9748 				 * hw state is consistent with the sw state
9749 				 */
9750 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9751 						__func__, acrtc->base.base.id);
9752 				continue;
9753 			}
9754 
9755 			if (dm_old_crtc_state->stream)
9756 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9757 
9758 			pm_runtime_get_noresume(dev->dev);
9759 
9760 			acrtc->enabled = true;
9761 			acrtc->hw_mode = new_crtc_state->mode;
9762 			crtc->hwmode = new_crtc_state->mode;
9763 			mode_set_reset_required = true;
9764 		} else if (modereset_required(new_crtc_state)) {
9765 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9766 			/* i.e. reset mode */
9767 			if (dm_old_crtc_state->stream)
9768 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9769 
9770 			mode_set_reset_required = true;
9771 		}
9772 	} /* for_each_crtc_in_state() */
9773 
9774 	if (dc_state) {
9775 		/* if there mode set or reset, disable eDP PSR */
9776 		if (mode_set_reset_required) {
9777 			if (dm->vblank_control_workqueue)
9778 				flush_workqueue(dm->vblank_control_workqueue);
9779 
9780 			amdgpu_dm_psr_disable_all(dm);
9781 		}
9782 
9783 		dm_enable_per_frame_crtc_master_sync(dc_state);
9784 		mutex_lock(&dm->dc_lock);
9785 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9786 
9787 		/* Allow idle optimization when vblank count is 0 for display off */
9788 		if (dm->active_vblank_irq_count == 0)
9789 			dc_allow_idle_optimizations(dm->dc, true);
9790 		mutex_unlock(&dm->dc_lock);
9791 	}
9792 
9793 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9794 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9795 
9796 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9797 
9798 		if (dm_new_crtc_state->stream != NULL) {
9799 			const struct dc_stream_status *status =
9800 					dc_stream_get_status(dm_new_crtc_state->stream);
9801 
9802 			if (!status)
9803 				status = dc_stream_get_status_from_state(dc_state,
9804 									 dm_new_crtc_state->stream);
9805 			if (!status)
9806 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9807 			else
9808 				acrtc->otg_inst = status->primary_otg_inst;
9809 		}
9810 	}
9811 #ifdef CONFIG_DRM_AMD_DC_HDCP
9812 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9813 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9814 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9815 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9816 
9817 		new_crtc_state = NULL;
9818 
9819 		if (acrtc)
9820 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9821 
9822 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9823 
9824 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9825 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9826 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9827 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9828 			dm_new_con_state->update_hdcp = true;
9829 			continue;
9830 		}
9831 
9832 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9833 			hdcp_update_display(
9834 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9835 				new_con_state->hdcp_content_type,
9836 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9837 	}
9838 #endif
9839 
9840 	/* Handle connector state changes */
9841 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9842 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9843 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9844 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9845 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9846 		struct dc_stream_update stream_update;
9847 		struct dc_info_packet hdr_packet;
9848 		struct dc_stream_status *status = NULL;
9849 		bool abm_changed, hdr_changed, scaling_changed;
9850 
9851 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9852 		memset(&stream_update, 0, sizeof(stream_update));
9853 
9854 		if (acrtc) {
9855 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9856 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9857 		}
9858 
9859 		/* Skip any modesets/resets */
9860 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9861 			continue;
9862 
9863 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9864 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9865 
9866 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9867 							     dm_old_con_state);
9868 
9869 		abm_changed = dm_new_crtc_state->abm_level !=
9870 			      dm_old_crtc_state->abm_level;
9871 
9872 		hdr_changed =
9873 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9874 
9875 		if (!scaling_changed && !abm_changed && !hdr_changed)
9876 			continue;
9877 
9878 		stream_update.stream = dm_new_crtc_state->stream;
9879 		if (scaling_changed) {
9880 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9881 					dm_new_con_state, dm_new_crtc_state->stream);
9882 
9883 			stream_update.src = dm_new_crtc_state->stream->src;
9884 			stream_update.dst = dm_new_crtc_state->stream->dst;
9885 		}
9886 
9887 		if (abm_changed) {
9888 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9889 
9890 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9891 		}
9892 
9893 		if (hdr_changed) {
9894 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9895 			stream_update.hdr_static_metadata = &hdr_packet;
9896 		}
9897 
9898 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9899 
9900 		if (WARN_ON(!status))
9901 			continue;
9902 
9903 		WARN_ON(!status->plane_count);
9904 
9905 		/*
9906 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9907 		 * Here we create an empty update on each plane.
9908 		 * To fix this, DC should permit updating only stream properties.
9909 		 */
9910 		for (j = 0; j < status->plane_count; j++)
9911 			dummy_updates[j].surface = status->plane_states[0];
9912 
9913 
9914 		mutex_lock(&dm->dc_lock);
9915 		dc_commit_updates_for_stream(dm->dc,
9916 						     dummy_updates,
9917 						     status->plane_count,
9918 						     dm_new_crtc_state->stream,
9919 						     &stream_update,
9920 						     dc_state);
9921 		mutex_unlock(&dm->dc_lock);
9922 	}
9923 
9924 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9925 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9926 				      new_crtc_state, i) {
9927 		if (old_crtc_state->active && !new_crtc_state->active)
9928 			crtc_disable_count++;
9929 
9930 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9931 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9932 
9933 		/* For freesync config update on crtc state and params for irq */
9934 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9935 
9936 		/* Handle vrr on->off / off->on transitions */
9937 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9938 						dm_new_crtc_state);
9939 	}
9940 
9941 	/**
9942 	 * Enable interrupts for CRTCs that are newly enabled or went through
9943 	 * a modeset. It was intentionally deferred until after the front end
9944 	 * state was modified to wait until the OTG was on and so the IRQ
9945 	 * handlers didn't access stale or invalid state.
9946 	 */
9947 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9948 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9949 #ifdef CONFIG_DEBUG_FS
9950 		bool configure_crc = false;
9951 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9952 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9953 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9954 #endif
9955 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9956 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9957 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9958 #endif
9959 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9960 
9961 		if (new_crtc_state->active &&
9962 		    (!old_crtc_state->active ||
9963 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9964 			dc_stream_retain(dm_new_crtc_state->stream);
9965 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9966 			manage_dm_interrupts(adev, acrtc, true);
9967 
9968 #ifdef CONFIG_DEBUG_FS
9969 			/**
9970 			 * Frontend may have changed so reapply the CRC capture
9971 			 * settings for the stream.
9972 			 */
9973 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9974 
9975 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9976 				configure_crc = true;
9977 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9978 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9979 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9980 					acrtc->dm_irq_params.crc_window.update_win = true;
9981 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9982 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9983 					crc_rd_wrk->crtc = crtc;
9984 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9985 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9986 				}
9987 #endif
9988 			}
9989 
9990 			if (configure_crc)
9991 				if (amdgpu_dm_crtc_configure_crc_source(
9992 					crtc, dm_new_crtc_state, cur_crc_src))
9993 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9994 #endif
9995 		}
9996 	}
9997 
9998 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9999 		if (new_crtc_state->async_flip)
10000 			wait_for_vblank = false;
10001 
10002 	/* update planes when needed per crtc*/
10003 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10004 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10005 
10006 		if (dm_new_crtc_state->stream)
10007 			amdgpu_dm_commit_planes(state, dc_state, dev,
10008 						dm, crtc, wait_for_vblank);
10009 	}
10010 
10011 	/* Update audio instances for each connector. */
10012 	amdgpu_dm_commit_audio(dev, state);
10013 
10014 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
10015 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
10016 	/* restore the backlight level */
10017 	for (i = 0; i < dm->num_of_edps; i++) {
10018 		if (dm->backlight_dev[i] &&
10019 		    (dm->actual_brightness[i] != dm->brightness[i]))
10020 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10021 	}
10022 #endif
10023 	/*
10024 	 * send vblank event on all events not handled in flip and
10025 	 * mark consumed event for drm_atomic_helper_commit_hw_done
10026 	 */
10027 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10028 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10029 
10030 		if (new_crtc_state->event)
10031 			drm_send_event_locked(dev, &new_crtc_state->event->base);
10032 
10033 		new_crtc_state->event = NULL;
10034 	}
10035 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10036 
10037 	/* Signal HW programming completion */
10038 	drm_atomic_helper_commit_hw_done(state);
10039 
10040 	if (wait_for_vblank)
10041 		drm_atomic_helper_wait_for_flip_done(dev, state);
10042 
10043 	drm_atomic_helper_cleanup_planes(dev, state);
10044 
10045 	/* return the stolen vga memory back to VRAM */
10046 	if (!adev->mman.keep_stolen_vga_memory)
10047 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10048 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10049 
10050 	/*
10051 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
10052 	 * so we can put the GPU into runtime suspend if we're not driving any
10053 	 * displays anymore
10054 	 */
10055 	for (i = 0; i < crtc_disable_count; i++)
10056 		pm_runtime_put_autosuspend(dev->dev);
10057 	pm_runtime_mark_last_busy(dev->dev);
10058 
10059 	if (dc_state_temp)
10060 		dc_release_state(dc_state_temp);
10061 }
10062 
10063 
10064 static int dm_force_atomic_commit(struct drm_connector *connector)
10065 {
10066 	int ret = 0;
10067 	struct drm_device *ddev = connector->dev;
10068 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10069 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10070 	struct drm_plane *plane = disconnected_acrtc->base.primary;
10071 	struct drm_connector_state *conn_state;
10072 	struct drm_crtc_state *crtc_state;
10073 	struct drm_plane_state *plane_state;
10074 
10075 	if (!state)
10076 		return -ENOMEM;
10077 
10078 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10079 
10080 	/* Construct an atomic state to restore previous display setting */
10081 
10082 	/*
10083 	 * Attach connectors to drm_atomic_state
10084 	 */
10085 	conn_state = drm_atomic_get_connector_state(state, connector);
10086 
10087 	ret = PTR_ERR_OR_ZERO(conn_state);
10088 	if (ret)
10089 		goto out;
10090 
10091 	/* Attach crtc to drm_atomic_state*/
10092 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10093 
10094 	ret = PTR_ERR_OR_ZERO(crtc_state);
10095 	if (ret)
10096 		goto out;
10097 
10098 	/* force a restore */
10099 	crtc_state->mode_changed = true;
10100 
10101 	/* Attach plane to drm_atomic_state */
10102 	plane_state = drm_atomic_get_plane_state(state, plane);
10103 
10104 	ret = PTR_ERR_OR_ZERO(plane_state);
10105 	if (ret)
10106 		goto out;
10107 
10108 	/* Call commit internally with the state we just constructed */
10109 	ret = drm_atomic_commit(state);
10110 
10111 out:
10112 	drm_atomic_state_put(state);
10113 	if (ret)
10114 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10115 
10116 	return ret;
10117 }
10118 
10119 /*
10120  * This function handles all cases when set mode does not come upon hotplug.
10121  * This includes when a display is unplugged then plugged back into the
10122  * same port and when running without usermode desktop manager supprot
10123  */
10124 void dm_restore_drm_connector_state(struct drm_device *dev,
10125 				    struct drm_connector *connector)
10126 {
10127 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10128 	struct amdgpu_crtc *disconnected_acrtc;
10129 	struct dm_crtc_state *acrtc_state;
10130 
10131 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10132 		return;
10133 
10134 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10135 	if (!disconnected_acrtc)
10136 		return;
10137 
10138 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10139 	if (!acrtc_state->stream)
10140 		return;
10141 
10142 	/*
10143 	 * If the previous sink is not released and different from the current,
10144 	 * we deduce we are in a state where we can not rely on usermode call
10145 	 * to turn on the display, so we do it here
10146 	 */
10147 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10148 		dm_force_atomic_commit(&aconnector->base);
10149 }
10150 
10151 /*
10152  * Grabs all modesetting locks to serialize against any blocking commits,
10153  * Waits for completion of all non blocking commits.
10154  */
10155 static int do_aquire_global_lock(struct drm_device *dev,
10156 				 struct drm_atomic_state *state)
10157 {
10158 	struct drm_crtc *crtc;
10159 	struct drm_crtc_commit *commit;
10160 	long ret;
10161 
10162 	/*
10163 	 * Adding all modeset locks to aquire_ctx will
10164 	 * ensure that when the framework release it the
10165 	 * extra locks we are locking here will get released to
10166 	 */
10167 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10168 	if (ret)
10169 		return ret;
10170 
10171 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10172 		spin_lock(&crtc->commit_lock);
10173 		commit = list_first_entry_or_null(&crtc->commit_list,
10174 				struct drm_crtc_commit, commit_entry);
10175 		if (commit)
10176 			drm_crtc_commit_get(commit);
10177 		spin_unlock(&crtc->commit_lock);
10178 
10179 		if (!commit)
10180 			continue;
10181 
10182 		/*
10183 		 * Make sure all pending HW programming completed and
10184 		 * page flips done
10185 		 */
10186 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10187 
10188 		if (ret > 0)
10189 			ret = wait_for_completion_interruptible_timeout(
10190 					&commit->flip_done, 10*HZ);
10191 
10192 		if (ret == 0)
10193 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10194 				  "timed out\n", crtc->base.id, crtc->name);
10195 
10196 		drm_crtc_commit_put(commit);
10197 	}
10198 
10199 	return ret < 0 ? ret : 0;
10200 }
10201 
10202 static void get_freesync_config_for_crtc(
10203 	struct dm_crtc_state *new_crtc_state,
10204 	struct dm_connector_state *new_con_state)
10205 {
10206 	struct mod_freesync_config config = {0};
10207 	struct amdgpu_dm_connector *aconnector =
10208 			to_amdgpu_dm_connector(new_con_state->base.connector);
10209 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10210 	int vrefresh = drm_mode_vrefresh(mode);
10211 	bool fs_vid_mode = false;
10212 
10213 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10214 					vrefresh >= aconnector->min_vfreq &&
10215 					vrefresh <= aconnector->max_vfreq;
10216 
10217 	if (new_crtc_state->vrr_supported) {
10218 		new_crtc_state->stream->ignore_msa_timing_param = true;
10219 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10220 
10221 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10222 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10223 		config.vsif_supported = true;
10224 		config.btr = true;
10225 
10226 		if (fs_vid_mode) {
10227 			config.state = VRR_STATE_ACTIVE_FIXED;
10228 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10229 			goto out;
10230 		} else if (new_crtc_state->base.vrr_enabled) {
10231 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10232 		} else {
10233 			config.state = VRR_STATE_INACTIVE;
10234 		}
10235 	}
10236 out:
10237 	new_crtc_state->freesync_config = config;
10238 }
10239 
10240 static void reset_freesync_config_for_crtc(
10241 	struct dm_crtc_state *new_crtc_state)
10242 {
10243 	new_crtc_state->vrr_supported = false;
10244 
10245 	memset(&new_crtc_state->vrr_infopacket, 0,
10246 	       sizeof(new_crtc_state->vrr_infopacket));
10247 }
10248 
10249 static bool
10250 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10251 				 struct drm_crtc_state *new_crtc_state)
10252 {
10253 	const struct drm_display_mode *old_mode, *new_mode;
10254 
10255 	if (!old_crtc_state || !new_crtc_state)
10256 		return false;
10257 
10258 	old_mode = &old_crtc_state->mode;
10259 	new_mode = &new_crtc_state->mode;
10260 
10261 	if (old_mode->clock       == new_mode->clock &&
10262 	    old_mode->hdisplay    == new_mode->hdisplay &&
10263 	    old_mode->vdisplay    == new_mode->vdisplay &&
10264 	    old_mode->htotal      == new_mode->htotal &&
10265 	    old_mode->vtotal      != new_mode->vtotal &&
10266 	    old_mode->hsync_start == new_mode->hsync_start &&
10267 	    old_mode->vsync_start != new_mode->vsync_start &&
10268 	    old_mode->hsync_end   == new_mode->hsync_end &&
10269 	    old_mode->vsync_end   != new_mode->vsync_end &&
10270 	    old_mode->hskew       == new_mode->hskew &&
10271 	    old_mode->vscan       == new_mode->vscan &&
10272 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10273 	    (new_mode->vsync_end - new_mode->vsync_start))
10274 		return true;
10275 
10276 	return false;
10277 }
10278 
10279 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10280 	uint64_t num, den, res;
10281 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10282 
10283 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10284 
10285 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10286 	den = (unsigned long long)new_crtc_state->mode.htotal *
10287 	      (unsigned long long)new_crtc_state->mode.vtotal;
10288 
10289 	res = div_u64(num, den);
10290 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10291 }
10292 
10293 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10294 			 struct drm_atomic_state *state,
10295 			 struct drm_crtc *crtc,
10296 			 struct drm_crtc_state *old_crtc_state,
10297 			 struct drm_crtc_state *new_crtc_state,
10298 			 bool enable,
10299 			 bool *lock_and_validation_needed)
10300 {
10301 	struct dm_atomic_state *dm_state = NULL;
10302 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10303 	struct dc_stream_state *new_stream;
10304 	int ret = 0;
10305 
10306 	/*
10307 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10308 	 * update changed items
10309 	 */
10310 	struct amdgpu_crtc *acrtc = NULL;
10311 	struct amdgpu_dm_connector *aconnector = NULL;
10312 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10313 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10314 
10315 	new_stream = NULL;
10316 
10317 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10318 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10319 	acrtc = to_amdgpu_crtc(crtc);
10320 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10321 
10322 	/* TODO This hack should go away */
10323 	if (aconnector && enable) {
10324 		/* Make sure fake sink is created in plug-in scenario */
10325 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10326 							    &aconnector->base);
10327 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10328 							    &aconnector->base);
10329 
10330 		if (IS_ERR(drm_new_conn_state)) {
10331 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10332 			goto fail;
10333 		}
10334 
10335 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10336 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10337 
10338 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10339 			goto skip_modeset;
10340 
10341 		new_stream = create_validate_stream_for_sink(aconnector,
10342 							     &new_crtc_state->mode,
10343 							     dm_new_conn_state,
10344 							     dm_old_crtc_state->stream);
10345 
10346 		/*
10347 		 * we can have no stream on ACTION_SET if a display
10348 		 * was disconnected during S3, in this case it is not an
10349 		 * error, the OS will be updated after detection, and
10350 		 * will do the right thing on next atomic commit
10351 		 */
10352 
10353 		if (!new_stream) {
10354 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10355 					__func__, acrtc->base.base.id);
10356 			ret = -ENOMEM;
10357 			goto fail;
10358 		}
10359 
10360 		/*
10361 		 * TODO: Check VSDB bits to decide whether this should
10362 		 * be enabled or not.
10363 		 */
10364 		new_stream->triggered_crtc_reset.enabled =
10365 			dm->force_timing_sync;
10366 
10367 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10368 
10369 		ret = fill_hdr_info_packet(drm_new_conn_state,
10370 					   &new_stream->hdr_static_metadata);
10371 		if (ret)
10372 			goto fail;
10373 
10374 		/*
10375 		 * If we already removed the old stream from the context
10376 		 * (and set the new stream to NULL) then we can't reuse
10377 		 * the old stream even if the stream and scaling are unchanged.
10378 		 * We'll hit the BUG_ON and black screen.
10379 		 *
10380 		 * TODO: Refactor this function to allow this check to work
10381 		 * in all conditions.
10382 		 */
10383 		if (dm_new_crtc_state->stream &&
10384 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10385 			goto skip_modeset;
10386 
10387 		if (dm_new_crtc_state->stream &&
10388 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10389 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10390 			new_crtc_state->mode_changed = false;
10391 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10392 					 new_crtc_state->mode_changed);
10393 		}
10394 	}
10395 
10396 	/* mode_changed flag may get updated above, need to check again */
10397 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10398 		goto skip_modeset;
10399 
10400 	drm_dbg_state(state->dev,
10401 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10402 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10403 		"connectors_changed:%d\n",
10404 		acrtc->crtc_id,
10405 		new_crtc_state->enable,
10406 		new_crtc_state->active,
10407 		new_crtc_state->planes_changed,
10408 		new_crtc_state->mode_changed,
10409 		new_crtc_state->active_changed,
10410 		new_crtc_state->connectors_changed);
10411 
10412 	/* Remove stream for any changed/disabled CRTC */
10413 	if (!enable) {
10414 
10415 		if (!dm_old_crtc_state->stream)
10416 			goto skip_modeset;
10417 
10418 		if (dm_new_crtc_state->stream &&
10419 		    is_timing_unchanged_for_freesync(new_crtc_state,
10420 						     old_crtc_state)) {
10421 			new_crtc_state->mode_changed = false;
10422 			DRM_DEBUG_DRIVER(
10423 				"Mode change not required for front porch change, "
10424 				"setting mode_changed to %d",
10425 				new_crtc_state->mode_changed);
10426 
10427 			set_freesync_fixed_config(dm_new_crtc_state);
10428 
10429 			goto skip_modeset;
10430 		} else if (aconnector &&
10431 			   is_freesync_video_mode(&new_crtc_state->mode,
10432 						  aconnector)) {
10433 			struct drm_display_mode *high_mode;
10434 
10435 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10436 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10437 				set_freesync_fixed_config(dm_new_crtc_state);
10438 			}
10439 		}
10440 
10441 		ret = dm_atomic_get_state(state, &dm_state);
10442 		if (ret)
10443 			goto fail;
10444 
10445 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10446 				crtc->base.id);
10447 
10448 		/* i.e. reset mode */
10449 		if (dc_remove_stream_from_ctx(
10450 				dm->dc,
10451 				dm_state->context,
10452 				dm_old_crtc_state->stream) != DC_OK) {
10453 			ret = -EINVAL;
10454 			goto fail;
10455 		}
10456 
10457 		dc_stream_release(dm_old_crtc_state->stream);
10458 		dm_new_crtc_state->stream = NULL;
10459 
10460 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10461 
10462 		*lock_and_validation_needed = true;
10463 
10464 	} else {/* Add stream for any updated/enabled CRTC */
10465 		/*
10466 		 * Quick fix to prevent NULL pointer on new_stream when
10467 		 * added MST connectors not found in existing crtc_state in the chained mode
10468 		 * TODO: need to dig out the root cause of that
10469 		 */
10470 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10471 			goto skip_modeset;
10472 
10473 		if (modereset_required(new_crtc_state))
10474 			goto skip_modeset;
10475 
10476 		if (modeset_required(new_crtc_state, new_stream,
10477 				     dm_old_crtc_state->stream)) {
10478 
10479 			WARN_ON(dm_new_crtc_state->stream);
10480 
10481 			ret = dm_atomic_get_state(state, &dm_state);
10482 			if (ret)
10483 				goto fail;
10484 
10485 			dm_new_crtc_state->stream = new_stream;
10486 
10487 			dc_stream_retain(new_stream);
10488 
10489 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10490 					 crtc->base.id);
10491 
10492 			if (dc_add_stream_to_ctx(
10493 					dm->dc,
10494 					dm_state->context,
10495 					dm_new_crtc_state->stream) != DC_OK) {
10496 				ret = -EINVAL;
10497 				goto fail;
10498 			}
10499 
10500 			*lock_and_validation_needed = true;
10501 		}
10502 	}
10503 
10504 skip_modeset:
10505 	/* Release extra reference */
10506 	if (new_stream)
10507 		 dc_stream_release(new_stream);
10508 
10509 	/*
10510 	 * We want to do dc stream updates that do not require a
10511 	 * full modeset below.
10512 	 */
10513 	if (!(enable && aconnector && new_crtc_state->active))
10514 		return 0;
10515 	/*
10516 	 * Given above conditions, the dc state cannot be NULL because:
10517 	 * 1. We're in the process of enabling CRTCs (just been added
10518 	 *    to the dc context, or already is on the context)
10519 	 * 2. Has a valid connector attached, and
10520 	 * 3. Is currently active and enabled.
10521 	 * => The dc stream state currently exists.
10522 	 */
10523 	BUG_ON(dm_new_crtc_state->stream == NULL);
10524 
10525 	/* Scaling or underscan settings */
10526 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10527 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10528 		update_stream_scaling_settings(
10529 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10530 
10531 	/* ABM settings */
10532 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10533 
10534 	/*
10535 	 * Color management settings. We also update color properties
10536 	 * when a modeset is needed, to ensure it gets reprogrammed.
10537 	 */
10538 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10539 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10540 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10541 		if (ret)
10542 			goto fail;
10543 	}
10544 
10545 	/* Update Freesync settings. */
10546 	get_freesync_config_for_crtc(dm_new_crtc_state,
10547 				     dm_new_conn_state);
10548 
10549 	return ret;
10550 
10551 fail:
10552 	if (new_stream)
10553 		dc_stream_release(new_stream);
10554 	return ret;
10555 }
10556 
10557 static bool should_reset_plane(struct drm_atomic_state *state,
10558 			       struct drm_plane *plane,
10559 			       struct drm_plane_state *old_plane_state,
10560 			       struct drm_plane_state *new_plane_state)
10561 {
10562 	struct drm_plane *other;
10563 	struct drm_plane_state *old_other_state, *new_other_state;
10564 	struct drm_crtc_state *new_crtc_state;
10565 	int i;
10566 
10567 	/*
10568 	 * TODO: Remove this hack once the checks below are sufficient
10569 	 * enough to determine when we need to reset all the planes on
10570 	 * the stream.
10571 	 */
10572 	if (state->allow_modeset)
10573 		return true;
10574 
10575 	/* Exit early if we know that we're adding or removing the plane. */
10576 	if (old_plane_state->crtc != new_plane_state->crtc)
10577 		return true;
10578 
10579 	/* old crtc == new_crtc == NULL, plane not in context. */
10580 	if (!new_plane_state->crtc)
10581 		return false;
10582 
10583 	new_crtc_state =
10584 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10585 
10586 	if (!new_crtc_state)
10587 		return true;
10588 
10589 	/* CRTC Degamma changes currently require us to recreate planes. */
10590 	if (new_crtc_state->color_mgmt_changed)
10591 		return true;
10592 
10593 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10594 		return true;
10595 
10596 	/*
10597 	 * If there are any new primary or overlay planes being added or
10598 	 * removed then the z-order can potentially change. To ensure
10599 	 * correct z-order and pipe acquisition the current DC architecture
10600 	 * requires us to remove and recreate all existing planes.
10601 	 *
10602 	 * TODO: Come up with a more elegant solution for this.
10603 	 */
10604 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10605 		struct amdgpu_framebuffer *old_afb, *new_afb;
10606 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10607 			continue;
10608 
10609 		if (old_other_state->crtc != new_plane_state->crtc &&
10610 		    new_other_state->crtc != new_plane_state->crtc)
10611 			continue;
10612 
10613 		if (old_other_state->crtc != new_other_state->crtc)
10614 			return true;
10615 
10616 		/* Src/dst size and scaling updates. */
10617 		if (old_other_state->src_w != new_other_state->src_w ||
10618 		    old_other_state->src_h != new_other_state->src_h ||
10619 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10620 		    old_other_state->crtc_h != new_other_state->crtc_h)
10621 			return true;
10622 
10623 		/* Rotation / mirroring updates. */
10624 		if (old_other_state->rotation != new_other_state->rotation)
10625 			return true;
10626 
10627 		/* Blending updates. */
10628 		if (old_other_state->pixel_blend_mode !=
10629 		    new_other_state->pixel_blend_mode)
10630 			return true;
10631 
10632 		/* Alpha updates. */
10633 		if (old_other_state->alpha != new_other_state->alpha)
10634 			return true;
10635 
10636 		/* Colorspace changes. */
10637 		if (old_other_state->color_range != new_other_state->color_range ||
10638 		    old_other_state->color_encoding != new_other_state->color_encoding)
10639 			return true;
10640 
10641 		/* Framebuffer checks fall at the end. */
10642 		if (!old_other_state->fb || !new_other_state->fb)
10643 			continue;
10644 
10645 		/* Pixel format changes can require bandwidth updates. */
10646 		if (old_other_state->fb->format != new_other_state->fb->format)
10647 			return true;
10648 
10649 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10650 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10651 
10652 		/* Tiling and DCC changes also require bandwidth updates. */
10653 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10654 		    old_afb->base.modifier != new_afb->base.modifier)
10655 			return true;
10656 	}
10657 
10658 	return false;
10659 }
10660 
10661 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10662 			      struct drm_plane_state *new_plane_state,
10663 			      struct drm_framebuffer *fb)
10664 {
10665 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10666 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10667 	unsigned int pitch;
10668 	bool linear;
10669 
10670 	if (fb->width > new_acrtc->max_cursor_width ||
10671 	    fb->height > new_acrtc->max_cursor_height) {
10672 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10673 				 new_plane_state->fb->width,
10674 				 new_plane_state->fb->height);
10675 		return -EINVAL;
10676 	}
10677 	if (new_plane_state->src_w != fb->width << 16 ||
10678 	    new_plane_state->src_h != fb->height << 16) {
10679 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10680 		return -EINVAL;
10681 	}
10682 
10683 	/* Pitch in pixels */
10684 	pitch = fb->pitches[0] / fb->format->cpp[0];
10685 
10686 	if (fb->width != pitch) {
10687 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10688 				 fb->width, pitch);
10689 		return -EINVAL;
10690 	}
10691 
10692 	switch (pitch) {
10693 	case 64:
10694 	case 128:
10695 	case 256:
10696 		/* FB pitch is supported by cursor plane */
10697 		break;
10698 	default:
10699 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10700 		return -EINVAL;
10701 	}
10702 
10703 	/* Core DRM takes care of checking FB modifiers, so we only need to
10704 	 * check tiling flags when the FB doesn't have a modifier. */
10705 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10706 		if (adev->family < AMDGPU_FAMILY_AI) {
10707 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10708 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10709 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10710 		} else {
10711 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10712 		}
10713 		if (!linear) {
10714 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10715 			return -EINVAL;
10716 		}
10717 	}
10718 
10719 	return 0;
10720 }
10721 
10722 static int dm_update_plane_state(struct dc *dc,
10723 				 struct drm_atomic_state *state,
10724 				 struct drm_plane *plane,
10725 				 struct drm_plane_state *old_plane_state,
10726 				 struct drm_plane_state *new_plane_state,
10727 				 bool enable,
10728 				 bool *lock_and_validation_needed)
10729 {
10730 
10731 	struct dm_atomic_state *dm_state = NULL;
10732 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10733 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10734 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10735 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10736 	struct amdgpu_crtc *new_acrtc;
10737 	bool needs_reset;
10738 	int ret = 0;
10739 
10740 
10741 	new_plane_crtc = new_plane_state->crtc;
10742 	old_plane_crtc = old_plane_state->crtc;
10743 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10744 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10745 
10746 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10747 		if (!enable || !new_plane_crtc ||
10748 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10749 			return 0;
10750 
10751 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10752 
10753 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10754 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10755 			return -EINVAL;
10756 		}
10757 
10758 		if (new_plane_state->fb) {
10759 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10760 						 new_plane_state->fb);
10761 			if (ret)
10762 				return ret;
10763 		}
10764 
10765 		return 0;
10766 	}
10767 
10768 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10769 					 new_plane_state);
10770 
10771 	/* Remove any changed/removed planes */
10772 	if (!enable) {
10773 		if (!needs_reset)
10774 			return 0;
10775 
10776 		if (!old_plane_crtc)
10777 			return 0;
10778 
10779 		old_crtc_state = drm_atomic_get_old_crtc_state(
10780 				state, old_plane_crtc);
10781 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10782 
10783 		if (!dm_old_crtc_state->stream)
10784 			return 0;
10785 
10786 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10787 				plane->base.id, old_plane_crtc->base.id);
10788 
10789 		ret = dm_atomic_get_state(state, &dm_state);
10790 		if (ret)
10791 			return ret;
10792 
10793 		if (!dc_remove_plane_from_context(
10794 				dc,
10795 				dm_old_crtc_state->stream,
10796 				dm_old_plane_state->dc_state,
10797 				dm_state->context)) {
10798 
10799 			return -EINVAL;
10800 		}
10801 
10802 
10803 		dc_plane_state_release(dm_old_plane_state->dc_state);
10804 		dm_new_plane_state->dc_state = NULL;
10805 
10806 		*lock_and_validation_needed = true;
10807 
10808 	} else { /* Add new planes */
10809 		struct dc_plane_state *dc_new_plane_state;
10810 
10811 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10812 			return 0;
10813 
10814 		if (!new_plane_crtc)
10815 			return 0;
10816 
10817 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10818 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10819 
10820 		if (!dm_new_crtc_state->stream)
10821 			return 0;
10822 
10823 		if (!needs_reset)
10824 			return 0;
10825 
10826 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10827 		if (ret)
10828 			return ret;
10829 
10830 		WARN_ON(dm_new_plane_state->dc_state);
10831 
10832 		dc_new_plane_state = dc_create_plane_state(dc);
10833 		if (!dc_new_plane_state)
10834 			return -ENOMEM;
10835 
10836 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10837 				 plane->base.id, new_plane_crtc->base.id);
10838 
10839 		ret = fill_dc_plane_attributes(
10840 			drm_to_adev(new_plane_crtc->dev),
10841 			dc_new_plane_state,
10842 			new_plane_state,
10843 			new_crtc_state);
10844 		if (ret) {
10845 			dc_plane_state_release(dc_new_plane_state);
10846 			return ret;
10847 		}
10848 
10849 		ret = dm_atomic_get_state(state, &dm_state);
10850 		if (ret) {
10851 			dc_plane_state_release(dc_new_plane_state);
10852 			return ret;
10853 		}
10854 
10855 		/*
10856 		 * Any atomic check errors that occur after this will
10857 		 * not need a release. The plane state will be attached
10858 		 * to the stream, and therefore part of the atomic
10859 		 * state. It'll be released when the atomic state is
10860 		 * cleaned.
10861 		 */
10862 		if (!dc_add_plane_to_context(
10863 				dc,
10864 				dm_new_crtc_state->stream,
10865 				dc_new_plane_state,
10866 				dm_state->context)) {
10867 
10868 			dc_plane_state_release(dc_new_plane_state);
10869 			return -EINVAL;
10870 		}
10871 
10872 		dm_new_plane_state->dc_state = dc_new_plane_state;
10873 
10874 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10875 
10876 		/* Tell DC to do a full surface update every time there
10877 		 * is a plane change. Inefficient, but works for now.
10878 		 */
10879 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10880 
10881 		*lock_and_validation_needed = true;
10882 	}
10883 
10884 
10885 	return ret;
10886 }
10887 
10888 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10889 				       int *src_w, int *src_h)
10890 {
10891 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10892 	case DRM_MODE_ROTATE_90:
10893 	case DRM_MODE_ROTATE_270:
10894 		*src_w = plane_state->src_h >> 16;
10895 		*src_h = plane_state->src_w >> 16;
10896 		break;
10897 	case DRM_MODE_ROTATE_0:
10898 	case DRM_MODE_ROTATE_180:
10899 	default:
10900 		*src_w = plane_state->src_w >> 16;
10901 		*src_h = plane_state->src_h >> 16;
10902 		break;
10903 	}
10904 }
10905 
10906 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10907 				struct drm_crtc *crtc,
10908 				struct drm_crtc_state *new_crtc_state)
10909 {
10910 	struct drm_plane *cursor = crtc->cursor, *underlying;
10911 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10912 	int i;
10913 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10914 	int cursor_src_w, cursor_src_h;
10915 	int underlying_src_w, underlying_src_h;
10916 
10917 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10918 	 * cursor per pipe but it's going to inherit the scaling and
10919 	 * positioning from the underlying pipe. Check the cursor plane's
10920 	 * blending properties match the underlying planes'. */
10921 
10922 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10923 	if (!new_cursor_state || !new_cursor_state->fb) {
10924 		return 0;
10925 	}
10926 
10927 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10928 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10929 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10930 
10931 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10932 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10933 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10934 			continue;
10935 
10936 		/* Ignore disabled planes */
10937 		if (!new_underlying_state->fb)
10938 			continue;
10939 
10940 		dm_get_oriented_plane_size(new_underlying_state,
10941 					   &underlying_src_w, &underlying_src_h);
10942 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10943 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10944 
10945 		if (cursor_scale_w != underlying_scale_w ||
10946 		    cursor_scale_h != underlying_scale_h) {
10947 			drm_dbg_atomic(crtc->dev,
10948 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10949 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10950 			return -EINVAL;
10951 		}
10952 
10953 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10954 		if (new_underlying_state->crtc_x <= 0 &&
10955 		    new_underlying_state->crtc_y <= 0 &&
10956 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10957 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10958 			break;
10959 	}
10960 
10961 	return 0;
10962 }
10963 
10964 #if defined(CONFIG_DRM_AMD_DC_DCN)
10965 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10966 {
10967 	struct drm_connector *connector;
10968 	struct drm_connector_state *conn_state, *old_conn_state;
10969 	struct amdgpu_dm_connector *aconnector = NULL;
10970 	int i;
10971 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10972 		if (!conn_state->crtc)
10973 			conn_state = old_conn_state;
10974 
10975 		if (conn_state->crtc != crtc)
10976 			continue;
10977 
10978 		aconnector = to_amdgpu_dm_connector(connector);
10979 		if (!aconnector->port || !aconnector->mst_port)
10980 			aconnector = NULL;
10981 		else
10982 			break;
10983 	}
10984 
10985 	if (!aconnector)
10986 		return 0;
10987 
10988 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10989 }
10990 #endif
10991 
10992 /**
10993  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10994  * @dev: The DRM device
10995  * @state: The atomic state to commit
10996  *
10997  * Validate that the given atomic state is programmable by DC into hardware.
10998  * This involves constructing a &struct dc_state reflecting the new hardware
10999  * state we wish to commit, then querying DC to see if it is programmable. It's
11000  * important not to modify the existing DC state. Otherwise, atomic_check
11001  * may unexpectedly commit hardware changes.
11002  *
11003  * When validating the DC state, it's important that the right locks are
11004  * acquired. For full updates case which removes/adds/updates streams on one
11005  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11006  * that any such full update commit will wait for completion of any outstanding
11007  * flip using DRMs synchronization events.
11008  *
11009  * Note that DM adds the affected connectors for all CRTCs in state, when that
11010  * might not seem necessary. This is because DC stream creation requires the
11011  * DC sink, which is tied to the DRM connector state. Cleaning this up should
11012  * be possible but non-trivial - a possible TODO item.
11013  *
11014  * Return: -Error code if validation failed.
11015  */
11016 static int amdgpu_dm_atomic_check(struct drm_device *dev,
11017 				  struct drm_atomic_state *state)
11018 {
11019 	struct amdgpu_device *adev = drm_to_adev(dev);
11020 	struct dm_atomic_state *dm_state = NULL;
11021 	struct dc *dc = adev->dm.dc;
11022 	struct drm_connector *connector;
11023 	struct drm_connector_state *old_con_state, *new_con_state;
11024 	struct drm_crtc *crtc;
11025 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11026 	struct drm_plane *plane;
11027 	struct drm_plane_state *old_plane_state, *new_plane_state;
11028 	enum dc_status status;
11029 	int ret, i;
11030 	bool lock_and_validation_needed = false;
11031 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11032 #if defined(CONFIG_DRM_AMD_DC_DCN)
11033 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
11034 	struct drm_dp_mst_topology_state *mst_state;
11035 	struct drm_dp_mst_topology_mgr *mgr;
11036 #endif
11037 
11038 	trace_amdgpu_dm_atomic_check_begin(state);
11039 
11040 	ret = drm_atomic_helper_check_modeset(dev, state);
11041 	if (ret) {
11042 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11043 		goto fail;
11044 	}
11045 
11046 	/* Check connector changes */
11047 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11048 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11049 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11050 
11051 		/* Skip connectors that are disabled or part of modeset already. */
11052 		if (!old_con_state->crtc && !new_con_state->crtc)
11053 			continue;
11054 
11055 		if (!new_con_state->crtc)
11056 			continue;
11057 
11058 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11059 		if (IS_ERR(new_crtc_state)) {
11060 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11061 			ret = PTR_ERR(new_crtc_state);
11062 			goto fail;
11063 		}
11064 
11065 		if (dm_old_con_state->abm_level !=
11066 		    dm_new_con_state->abm_level)
11067 			new_crtc_state->connectors_changed = true;
11068 	}
11069 
11070 #if defined(CONFIG_DRM_AMD_DC_DCN)
11071 	if (dc_resource_is_dsc_encoding_supported(dc)) {
11072 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11073 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11074 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11075 				if (ret) {
11076 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11077 					goto fail;
11078 				}
11079 			}
11080 		}
11081 		pre_validate_dsc(state, &dm_state, vars);
11082 	}
11083 #endif
11084 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11085 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11086 
11087 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11088 		    !new_crtc_state->color_mgmt_changed &&
11089 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11090 			dm_old_crtc_state->dsc_force_changed == false)
11091 			continue;
11092 
11093 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11094 		if (ret) {
11095 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11096 			goto fail;
11097 		}
11098 
11099 		if (!new_crtc_state->enable)
11100 			continue;
11101 
11102 		ret = drm_atomic_add_affected_connectors(state, crtc);
11103 		if (ret) {
11104 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11105 			goto fail;
11106 		}
11107 
11108 		ret = drm_atomic_add_affected_planes(state, crtc);
11109 		if (ret) {
11110 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11111 			goto fail;
11112 		}
11113 
11114 		if (dm_old_crtc_state->dsc_force_changed)
11115 			new_crtc_state->mode_changed = true;
11116 	}
11117 
11118 	/*
11119 	 * Add all primary and overlay planes on the CRTC to the state
11120 	 * whenever a plane is enabled to maintain correct z-ordering
11121 	 * and to enable fast surface updates.
11122 	 */
11123 	drm_for_each_crtc(crtc, dev) {
11124 		bool modified = false;
11125 
11126 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11127 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11128 				continue;
11129 
11130 			if (new_plane_state->crtc == crtc ||
11131 			    old_plane_state->crtc == crtc) {
11132 				modified = true;
11133 				break;
11134 			}
11135 		}
11136 
11137 		if (!modified)
11138 			continue;
11139 
11140 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11141 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11142 				continue;
11143 
11144 			new_plane_state =
11145 				drm_atomic_get_plane_state(state, plane);
11146 
11147 			if (IS_ERR(new_plane_state)) {
11148 				ret = PTR_ERR(new_plane_state);
11149 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11150 				goto fail;
11151 			}
11152 		}
11153 	}
11154 
11155 	/* Remove exiting planes if they are modified */
11156 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11157 		ret = dm_update_plane_state(dc, state, plane,
11158 					    old_plane_state,
11159 					    new_plane_state,
11160 					    false,
11161 					    &lock_and_validation_needed);
11162 		if (ret) {
11163 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11164 			goto fail;
11165 		}
11166 	}
11167 
11168 	/* Disable all crtcs which require disable */
11169 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11170 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11171 					   old_crtc_state,
11172 					   new_crtc_state,
11173 					   false,
11174 					   &lock_and_validation_needed);
11175 		if (ret) {
11176 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11177 			goto fail;
11178 		}
11179 	}
11180 
11181 	/* Enable all crtcs which require enable */
11182 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11183 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11184 					   old_crtc_state,
11185 					   new_crtc_state,
11186 					   true,
11187 					   &lock_and_validation_needed);
11188 		if (ret) {
11189 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11190 			goto fail;
11191 		}
11192 	}
11193 
11194 	/* Add new/modified planes */
11195 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11196 		ret = dm_update_plane_state(dc, state, plane,
11197 					    old_plane_state,
11198 					    new_plane_state,
11199 					    true,
11200 					    &lock_and_validation_needed);
11201 		if (ret) {
11202 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11203 			goto fail;
11204 		}
11205 	}
11206 
11207 	/* Run this here since we want to validate the streams we created */
11208 	ret = drm_atomic_helper_check_planes(dev, state);
11209 	if (ret) {
11210 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11211 		goto fail;
11212 	}
11213 
11214 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11215 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11216 		if (dm_new_crtc_state->mpo_requested)
11217 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11218 	}
11219 
11220 	/* Check cursor planes scaling */
11221 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11222 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11223 		if (ret) {
11224 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11225 			goto fail;
11226 		}
11227 	}
11228 
11229 	if (state->legacy_cursor_update) {
11230 		/*
11231 		 * This is a fast cursor update coming from the plane update
11232 		 * helper, check if it can be done asynchronously for better
11233 		 * performance.
11234 		 */
11235 		state->async_update =
11236 			!drm_atomic_helper_async_check(dev, state);
11237 
11238 		/*
11239 		 * Skip the remaining global validation if this is an async
11240 		 * update. Cursor updates can be done without affecting
11241 		 * state or bandwidth calcs and this avoids the performance
11242 		 * penalty of locking the private state object and
11243 		 * allocating a new dc_state.
11244 		 */
11245 		if (state->async_update)
11246 			return 0;
11247 	}
11248 
11249 	/* Check scaling and underscan changes*/
11250 	/* TODO Removed scaling changes validation due to inability to commit
11251 	 * new stream into context w\o causing full reset. Need to
11252 	 * decide how to handle.
11253 	 */
11254 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11255 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11256 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11257 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11258 
11259 		/* Skip any modesets/resets */
11260 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11261 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11262 			continue;
11263 
11264 		/* Skip any thing not scale or underscan changes */
11265 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11266 			continue;
11267 
11268 		lock_and_validation_needed = true;
11269 	}
11270 
11271 #if defined(CONFIG_DRM_AMD_DC_DCN)
11272 	/* set the slot info for each mst_state based on the link encoding format */
11273 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11274 		struct amdgpu_dm_connector *aconnector;
11275 		struct drm_connector *connector;
11276 		struct drm_connector_list_iter iter;
11277 		u8 link_coding_cap;
11278 
11279 		if (!mgr->mst_state )
11280 			continue;
11281 
11282 		drm_connector_list_iter_begin(dev, &iter);
11283 		drm_for_each_connector_iter(connector, &iter) {
11284 			int id = connector->index;
11285 
11286 			if (id == mst_state->mgr->conn_base_id) {
11287 				aconnector = to_amdgpu_dm_connector(connector);
11288 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11289 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11290 
11291 				break;
11292 			}
11293 		}
11294 		drm_connector_list_iter_end(&iter);
11295 
11296 	}
11297 #endif
11298 	/**
11299 	 * Streams and planes are reset when there are changes that affect
11300 	 * bandwidth. Anything that affects bandwidth needs to go through
11301 	 * DC global validation to ensure that the configuration can be applied
11302 	 * to hardware.
11303 	 *
11304 	 * We have to currently stall out here in atomic_check for outstanding
11305 	 * commits to finish in this case because our IRQ handlers reference
11306 	 * DRM state directly - we can end up disabling interrupts too early
11307 	 * if we don't.
11308 	 *
11309 	 * TODO: Remove this stall and drop DM state private objects.
11310 	 */
11311 	if (lock_and_validation_needed) {
11312 		ret = dm_atomic_get_state(state, &dm_state);
11313 		if (ret) {
11314 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11315 			goto fail;
11316 		}
11317 
11318 		ret = do_aquire_global_lock(dev, state);
11319 		if (ret) {
11320 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11321 			goto fail;
11322 		}
11323 
11324 #if defined(CONFIG_DRM_AMD_DC_DCN)
11325 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11326 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11327 			goto fail;
11328 		}
11329 
11330 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11331 		if (ret) {
11332 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11333 			goto fail;
11334 		}
11335 #endif
11336 
11337 		/*
11338 		 * Perform validation of MST topology in the state:
11339 		 * We need to perform MST atomic check before calling
11340 		 * dc_validate_global_state(), or there is a chance
11341 		 * to get stuck in an infinite loop and hang eventually.
11342 		 */
11343 		ret = drm_dp_mst_atomic_check(state);
11344 		if (ret) {
11345 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11346 			goto fail;
11347 		}
11348 		status = dc_validate_global_state(dc, dm_state->context, true);
11349 		if (status != DC_OK) {
11350 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11351 				       dc_status_to_str(status), status);
11352 			ret = -EINVAL;
11353 			goto fail;
11354 		}
11355 	} else {
11356 		/*
11357 		 * The commit is a fast update. Fast updates shouldn't change
11358 		 * the DC context, affect global validation, and can have their
11359 		 * commit work done in parallel with other commits not touching
11360 		 * the same resource. If we have a new DC context as part of
11361 		 * the DM atomic state from validation we need to free it and
11362 		 * retain the existing one instead.
11363 		 *
11364 		 * Furthermore, since the DM atomic state only contains the DC
11365 		 * context and can safely be annulled, we can free the state
11366 		 * and clear the associated private object now to free
11367 		 * some memory and avoid a possible use-after-free later.
11368 		 */
11369 
11370 		for (i = 0; i < state->num_private_objs; i++) {
11371 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11372 
11373 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11374 				int j = state->num_private_objs-1;
11375 
11376 				dm_atomic_destroy_state(obj,
11377 						state->private_objs[i].state);
11378 
11379 				/* If i is not at the end of the array then the
11380 				 * last element needs to be moved to where i was
11381 				 * before the array can safely be truncated.
11382 				 */
11383 				if (i != j)
11384 					state->private_objs[i] =
11385 						state->private_objs[j];
11386 
11387 				state->private_objs[j].ptr = NULL;
11388 				state->private_objs[j].state = NULL;
11389 				state->private_objs[j].old_state = NULL;
11390 				state->private_objs[j].new_state = NULL;
11391 
11392 				state->num_private_objs = j;
11393 				break;
11394 			}
11395 		}
11396 	}
11397 
11398 	/* Store the overall update type for use later in atomic check. */
11399 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11400 		struct dm_crtc_state *dm_new_crtc_state =
11401 			to_dm_crtc_state(new_crtc_state);
11402 
11403 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11404 							 UPDATE_TYPE_FULL :
11405 							 UPDATE_TYPE_FAST;
11406 	}
11407 
11408 	/* Must be success */
11409 	WARN_ON(ret);
11410 
11411 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11412 
11413 	return ret;
11414 
11415 fail:
11416 	if (ret == -EDEADLK)
11417 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11418 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11419 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11420 	else
11421 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11422 
11423 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11424 
11425 	return ret;
11426 }
11427 
11428 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11429 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11430 {
11431 	uint8_t dpcd_data;
11432 	bool capable = false;
11433 
11434 	if (amdgpu_dm_connector->dc_link &&
11435 		dm_helpers_dp_read_dpcd(
11436 				NULL,
11437 				amdgpu_dm_connector->dc_link,
11438 				DP_DOWN_STREAM_PORT_COUNT,
11439 				&dpcd_data,
11440 				sizeof(dpcd_data))) {
11441 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11442 	}
11443 
11444 	return capable;
11445 }
11446 
11447 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11448 		unsigned int offset,
11449 		unsigned int total_length,
11450 		uint8_t *data,
11451 		unsigned int length,
11452 		struct amdgpu_hdmi_vsdb_info *vsdb)
11453 {
11454 	bool res;
11455 	union dmub_rb_cmd cmd;
11456 	struct dmub_cmd_send_edid_cea *input;
11457 	struct dmub_cmd_edid_cea_output *output;
11458 
11459 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11460 		return false;
11461 
11462 	memset(&cmd, 0, sizeof(cmd));
11463 
11464 	input = &cmd.edid_cea.data.input;
11465 
11466 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11467 	cmd.edid_cea.header.sub_type = 0;
11468 	cmd.edid_cea.header.payload_bytes =
11469 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11470 	input->offset = offset;
11471 	input->length = length;
11472 	input->cea_total_length = total_length;
11473 	memcpy(input->payload, data, length);
11474 
11475 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11476 	if (!res) {
11477 		DRM_ERROR("EDID CEA parser failed\n");
11478 		return false;
11479 	}
11480 
11481 	output = &cmd.edid_cea.data.output;
11482 
11483 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11484 		if (!output->ack.success) {
11485 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11486 					output->ack.offset);
11487 		}
11488 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11489 		if (!output->amd_vsdb.vsdb_found)
11490 			return false;
11491 
11492 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11493 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11494 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11495 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11496 	} else {
11497 		DRM_WARN("Unknown EDID CEA parser results\n");
11498 		return false;
11499 	}
11500 
11501 	return true;
11502 }
11503 
11504 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11505 		uint8_t *edid_ext, int len,
11506 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11507 {
11508 	int i;
11509 
11510 	/* send extension block to DMCU for parsing */
11511 	for (i = 0; i < len; i += 8) {
11512 		bool res;
11513 		int offset;
11514 
11515 		/* send 8 bytes a time */
11516 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11517 			return false;
11518 
11519 		if (i+8 == len) {
11520 			/* EDID block sent completed, expect result */
11521 			int version, min_rate, max_rate;
11522 
11523 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11524 			if (res) {
11525 				/* amd vsdb found */
11526 				vsdb_info->freesync_supported = 1;
11527 				vsdb_info->amd_vsdb_version = version;
11528 				vsdb_info->min_refresh_rate_hz = min_rate;
11529 				vsdb_info->max_refresh_rate_hz = max_rate;
11530 				return true;
11531 			}
11532 			/* not amd vsdb */
11533 			return false;
11534 		}
11535 
11536 		/* check for ack*/
11537 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11538 		if (!res)
11539 			return false;
11540 	}
11541 
11542 	return false;
11543 }
11544 
11545 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11546 		uint8_t *edid_ext, int len,
11547 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11548 {
11549 	int i;
11550 
11551 	/* send extension block to DMCU for parsing */
11552 	for (i = 0; i < len; i += 8) {
11553 		/* send 8 bytes a time */
11554 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11555 			return false;
11556 	}
11557 
11558 	return vsdb_info->freesync_supported;
11559 }
11560 
11561 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11562 		uint8_t *edid_ext, int len,
11563 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11564 {
11565 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11566 
11567 	if (adev->dm.dmub_srv)
11568 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11569 	else
11570 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11571 }
11572 
11573 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11574 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11575 {
11576 	uint8_t *edid_ext = NULL;
11577 	int i;
11578 	bool valid_vsdb_found = false;
11579 
11580 	/*----- drm_find_cea_extension() -----*/
11581 	/* No EDID or EDID extensions */
11582 	if (edid == NULL || edid->extensions == 0)
11583 		return -ENODEV;
11584 
11585 	/* Find CEA extension */
11586 	for (i = 0; i < edid->extensions; i++) {
11587 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11588 		if (edid_ext[0] == CEA_EXT)
11589 			break;
11590 	}
11591 
11592 	if (i == edid->extensions)
11593 		return -ENODEV;
11594 
11595 	/*----- cea_db_offsets() -----*/
11596 	if (edid_ext[0] != CEA_EXT)
11597 		return -ENODEV;
11598 
11599 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11600 
11601 	return valid_vsdb_found ? i : -ENODEV;
11602 }
11603 
11604 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11605 					struct edid *edid)
11606 {
11607 	int i = 0;
11608 	struct detailed_timing *timing;
11609 	struct detailed_non_pixel *data;
11610 	struct detailed_data_monitor_range *range;
11611 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11612 			to_amdgpu_dm_connector(connector);
11613 	struct dm_connector_state *dm_con_state = NULL;
11614 	struct dc_sink *sink;
11615 
11616 	struct drm_device *dev = connector->dev;
11617 	struct amdgpu_device *adev = drm_to_adev(dev);
11618 	bool freesync_capable = false;
11619 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11620 
11621 	if (!connector->state) {
11622 		DRM_ERROR("%s - Connector has no state", __func__);
11623 		goto update;
11624 	}
11625 
11626 	sink = amdgpu_dm_connector->dc_sink ?
11627 		amdgpu_dm_connector->dc_sink :
11628 		amdgpu_dm_connector->dc_em_sink;
11629 
11630 	if (!edid || !sink) {
11631 		dm_con_state = to_dm_connector_state(connector->state);
11632 
11633 		amdgpu_dm_connector->min_vfreq = 0;
11634 		amdgpu_dm_connector->max_vfreq = 0;
11635 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11636 		connector->display_info.monitor_range.min_vfreq = 0;
11637 		connector->display_info.monitor_range.max_vfreq = 0;
11638 		freesync_capable = false;
11639 
11640 		goto update;
11641 	}
11642 
11643 	dm_con_state = to_dm_connector_state(connector->state);
11644 
11645 	if (!adev->dm.freesync_module)
11646 		goto update;
11647 
11648 
11649 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11650 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11651 		bool edid_check_required = false;
11652 
11653 		if (edid) {
11654 			edid_check_required = is_dp_capable_without_timing_msa(
11655 						adev->dm.dc,
11656 						amdgpu_dm_connector);
11657 		}
11658 
11659 		if (edid_check_required == true && (edid->version > 1 ||
11660 		   (edid->version == 1 && edid->revision > 1))) {
11661 			for (i = 0; i < 4; i++) {
11662 
11663 				timing	= &edid->detailed_timings[i];
11664 				data	= &timing->data.other_data;
11665 				range	= &data->data.range;
11666 				/*
11667 				 * Check if monitor has continuous frequency mode
11668 				 */
11669 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11670 					continue;
11671 				/*
11672 				 * Check for flag range limits only. If flag == 1 then
11673 				 * no additional timing information provided.
11674 				 * Default GTF, GTF Secondary curve and CVT are not
11675 				 * supported
11676 				 */
11677 				if (range->flags != 1)
11678 					continue;
11679 
11680 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11681 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11682 				amdgpu_dm_connector->pixel_clock_mhz =
11683 					range->pixel_clock_mhz * 10;
11684 
11685 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11686 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11687 
11688 				break;
11689 			}
11690 
11691 			if (amdgpu_dm_connector->max_vfreq -
11692 			    amdgpu_dm_connector->min_vfreq > 10) {
11693 
11694 				freesync_capable = true;
11695 			}
11696 		}
11697 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11698 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11699 		if (i >= 0 && vsdb_info.freesync_supported) {
11700 			timing  = &edid->detailed_timings[i];
11701 			data    = &timing->data.other_data;
11702 
11703 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11704 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11705 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11706 				freesync_capable = true;
11707 
11708 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11709 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11710 		}
11711 	}
11712 
11713 update:
11714 	if (dm_con_state)
11715 		dm_con_state->freesync_capable = freesync_capable;
11716 
11717 	if (connector->vrr_capable_property)
11718 		drm_connector_set_vrr_capable_property(connector,
11719 						       freesync_capable);
11720 }
11721 
11722 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11723 {
11724 	struct amdgpu_device *adev = drm_to_adev(dev);
11725 	struct dc *dc = adev->dm.dc;
11726 	int i;
11727 
11728 	mutex_lock(&adev->dm.dc_lock);
11729 	if (dc->current_state) {
11730 		for (i = 0; i < dc->current_state->stream_count; ++i)
11731 			dc->current_state->streams[i]
11732 				->triggered_crtc_reset.enabled =
11733 				adev->dm.force_timing_sync;
11734 
11735 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11736 		dc_trigger_sync(dc, dc->current_state);
11737 	}
11738 	mutex_unlock(&adev->dm.dc_lock);
11739 }
11740 
11741 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11742 		       uint32_t value, const char *func_name)
11743 {
11744 #ifdef DM_CHECK_ADDR_0
11745 	if (address == 0) {
11746 		DC_ERR("invalid register write. address = 0");
11747 		return;
11748 	}
11749 #endif
11750 	cgs_write_register(ctx->cgs_device, address, value);
11751 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11752 }
11753 
11754 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11755 			  const char *func_name)
11756 {
11757 	uint32_t value;
11758 #ifdef DM_CHECK_ADDR_0
11759 	if (address == 0) {
11760 		DC_ERR("invalid register read; address = 0\n");
11761 		return 0;
11762 	}
11763 #endif
11764 
11765 	if (ctx->dmub_srv &&
11766 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11767 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11768 		ASSERT(false);
11769 		return 0;
11770 	}
11771 
11772 	value = cgs_read_register(ctx->cgs_device, address);
11773 
11774 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11775 
11776 	return value;
11777 }
11778 
11779 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11780 						struct dc_context *ctx,
11781 						uint8_t status_type,
11782 						uint32_t *operation_result)
11783 {
11784 	struct amdgpu_device *adev = ctx->driver_context;
11785 	int return_status = -1;
11786 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11787 
11788 	if (is_cmd_aux) {
11789 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11790 			return_status = p_notify->aux_reply.length;
11791 			*operation_result = p_notify->result;
11792 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11793 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11794 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11795 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11796 		} else {
11797 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11798 		}
11799 	} else {
11800 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11801 			return_status = 0;
11802 			*operation_result = p_notify->sc_status;
11803 		} else {
11804 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11805 		}
11806 	}
11807 
11808 	return return_status;
11809 }
11810 
11811 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11812 	unsigned int link_index, void *cmd_payload, void *operation_result)
11813 {
11814 	struct amdgpu_device *adev = ctx->driver_context;
11815 	int ret = 0;
11816 
11817 	if (is_cmd_aux) {
11818 		dc_process_dmub_aux_transfer_async(ctx->dc,
11819 			link_index, (struct aux_payload *)cmd_payload);
11820 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11821 					(struct set_config_cmd_payload *)cmd_payload,
11822 					adev->dm.dmub_notify)) {
11823 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11824 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11825 					(uint32_t *)operation_result);
11826 	}
11827 
11828 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11829 	if (ret == 0) {
11830 		DRM_ERROR("wait_for_completion_timeout timeout!");
11831 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11832 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11833 				(uint32_t *)operation_result);
11834 	}
11835 
11836 	if (is_cmd_aux) {
11837 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11838 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11839 
11840 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11841 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11842 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11843 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11844 				       adev->dm.dmub_notify->aux_reply.length);
11845 			}
11846 		}
11847 	}
11848 
11849 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11850 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11851 			(uint32_t *)operation_result);
11852 }
11853 
11854 /*
11855  * Check whether seamless boot is supported.
11856  *
11857  * So far we only support seamless boot on CHIP_VANGOGH.
11858  * If everything goes well, we may consider expanding
11859  * seamless boot to other ASICs.
11860  */
11861 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11862 {
11863 	switch (adev->asic_type) {
11864 	case CHIP_VANGOGH:
11865 		if (!adev->mman.keep_stolen_vga_memory)
11866 			return true;
11867 		break;
11868 	default:
11869 		break;
11870 	}
11871 
11872 	return false;
11873 }
11874