1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_fb_helper.h>
82 #include <drm/drm_fourcc.h>
83 #include <drm/drm_edid.h>
84 #include <drm/drm_vblank.h>
85 #include <drm/drm_audio_component.h>
86 
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "soc15_common.h"
93 #include "vega10_ip_offset.h"
94 
95 #include "soc15_common.h"
96 
97 #include "gc/gc_11_0_0_offset.h"
98 #include "gc/gc_11_0_0_sh_mask.h"
99 
100 #include "modules/inc/mod_freesync.h"
101 #include "modules/power/power_helpers.h"
102 #include "modules/inc/mod_info_packet.h"
103 
104 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
106 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
108 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
110 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
112 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
114 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
116 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
118 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
120 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
121 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
122 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
123 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
124 
125 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
127 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
129 
130 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
131 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
132 
133 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
134 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
135 
136 /* Number of bytes in PSP header for firmware. */
137 #define PSP_HEADER_BYTES 0x100
138 
139 /* Number of bytes in PSP footer for firmware. */
140 #define PSP_FOOTER_BYTES 0x100
141 
142 /**
143  * DOC: overview
144  *
145  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
146  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
147  * requests into DC requests, and DC responses into DRM responses.
148  *
149  * The root control structure is &struct amdgpu_display_manager.
150  */
151 
152 /* basic init/fini API */
153 static int amdgpu_dm_init(struct amdgpu_device *adev);
154 static void amdgpu_dm_fini(struct amdgpu_device *adev);
155 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
156 
157 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
158 {
159 	switch (link->dpcd_caps.dongle_type) {
160 	case DISPLAY_DONGLE_NONE:
161 		return DRM_MODE_SUBCONNECTOR_Native;
162 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
163 		return DRM_MODE_SUBCONNECTOR_VGA;
164 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
165 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
166 		return DRM_MODE_SUBCONNECTOR_DVID;
167 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
168 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
169 		return DRM_MODE_SUBCONNECTOR_HDMIA;
170 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
171 	default:
172 		return DRM_MODE_SUBCONNECTOR_Unknown;
173 	}
174 }
175 
176 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
177 {
178 	struct dc_link *link = aconnector->dc_link;
179 	struct drm_connector *connector = &aconnector->base;
180 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
181 
182 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
183 		return;
184 
185 	if (aconnector->dc_sink)
186 		subconnector = get_subconnector_type(link);
187 
188 	drm_object_property_set_value(&connector->base,
189 			connector->dev->mode_config.dp_subconnector_property,
190 			subconnector);
191 }
192 
193 /*
194  * initializes drm_device display related structures, based on the information
195  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
196  * drm_encoder, drm_mode_config
197  *
198  * Returns 0 on success
199  */
200 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
201 /* removes and deallocates the drm structures, created by the above function */
202 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
203 
204 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
205 				struct drm_plane *plane,
206 				unsigned long possible_crtcs,
207 				const struct dc_plane_cap *plane_cap);
208 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
209 			       struct drm_plane *plane,
210 			       uint32_t link_index);
211 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
212 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
213 				    uint32_t link_index,
214 				    struct amdgpu_encoder *amdgpu_encoder);
215 static int amdgpu_dm_encoder_init(struct drm_device *dev,
216 				  struct amdgpu_encoder *aencoder,
217 				  uint32_t link_index);
218 
219 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
220 
221 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
222 
223 static int amdgpu_dm_atomic_check(struct drm_device *dev,
224 				  struct drm_atomic_state *state);
225 
226 static void handle_cursor_update(struct drm_plane *plane,
227 				 struct drm_plane_state *old_plane_state);
228 
229 static const struct drm_format_info *
230 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
231 
232 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
233 static void handle_hpd_rx_irq(void *param);
234 
235 static bool
236 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
237 				 struct drm_crtc_state *new_crtc_state);
238 /*
239  * dm_vblank_get_counter
240  *
241  * @brief
242  * Get counter for number of vertical blanks
243  *
244  * @param
245  * struct amdgpu_device *adev - [in] desired amdgpu device
246  * int disp_idx - [in] which CRTC to get the counter from
247  *
248  * @return
249  * Counter for vertical blanks
250  */
251 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
252 {
253 	if (crtc >= adev->mode_info.num_crtc)
254 		return 0;
255 	else {
256 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
257 
258 		if (acrtc->dm_irq_params.stream == NULL) {
259 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
260 				  crtc);
261 			return 0;
262 		}
263 
264 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
265 	}
266 }
267 
268 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
269 				  u32 *vbl, u32 *position)
270 {
271 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
272 
273 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
274 		return -EINVAL;
275 	else {
276 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
277 
278 		if (acrtc->dm_irq_params.stream ==  NULL) {
279 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
280 				  crtc);
281 			return 0;
282 		}
283 
284 		/*
285 		 * TODO rework base driver to use values directly.
286 		 * for now parse it back into reg-format
287 		 */
288 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
289 					 &v_blank_start,
290 					 &v_blank_end,
291 					 &h_position,
292 					 &v_position);
293 
294 		*position = v_position | (h_position << 16);
295 		*vbl = v_blank_start | (v_blank_end << 16);
296 	}
297 
298 	return 0;
299 }
300 
301 static bool dm_is_idle(void *handle)
302 {
303 	/* XXX todo */
304 	return true;
305 }
306 
307 static int dm_wait_for_idle(void *handle)
308 {
309 	/* XXX todo */
310 	return 0;
311 }
312 
313 static bool dm_check_soft_reset(void *handle)
314 {
315 	return false;
316 }
317 
318 static int dm_soft_reset(void *handle)
319 {
320 	/* XXX todo */
321 	return 0;
322 }
323 
324 static struct amdgpu_crtc *
325 get_crtc_by_otg_inst(struct amdgpu_device *adev,
326 		     int otg_inst)
327 {
328 	struct drm_device *dev = adev_to_drm(adev);
329 	struct drm_crtc *crtc;
330 	struct amdgpu_crtc *amdgpu_crtc;
331 
332 	if (WARN_ON(otg_inst == -1))
333 		return adev->mode_info.crtcs[0];
334 
335 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
336 		amdgpu_crtc = to_amdgpu_crtc(crtc);
337 
338 		if (amdgpu_crtc->otg_inst == otg_inst)
339 			return amdgpu_crtc;
340 	}
341 
342 	return NULL;
343 }
344 
345 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
346 {
347 	return acrtc->dm_irq_params.freesync_config.state ==
348 		       VRR_STATE_ACTIVE_VARIABLE ||
349 	       acrtc->dm_irq_params.freesync_config.state ==
350 		       VRR_STATE_ACTIVE_FIXED;
351 }
352 
353 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
354 {
355 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
356 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
357 }
358 
359 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
360 					      struct dm_crtc_state *new_state)
361 {
362 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
363 		return true;
364 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
365 		return true;
366 	else
367 		return false;
368 }
369 
370 /**
371  * dm_pflip_high_irq() - Handle pageflip interrupt
372  * @interrupt_params: ignored
373  *
374  * Handles the pageflip interrupt by notifying all interested parties
375  * that the pageflip has been completed.
376  */
377 static void dm_pflip_high_irq(void *interrupt_params)
378 {
379 	struct amdgpu_crtc *amdgpu_crtc;
380 	struct common_irq_params *irq_params = interrupt_params;
381 	struct amdgpu_device *adev = irq_params->adev;
382 	unsigned long flags;
383 	struct drm_pending_vblank_event *e;
384 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
385 	bool vrr_active;
386 
387 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
388 
389 	/* IRQ could occur when in initial stage */
390 	/* TODO work and BO cleanup */
391 	if (amdgpu_crtc == NULL) {
392 		DC_LOG_PFLIP("CRTC is null, returning.\n");
393 		return;
394 	}
395 
396 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
397 
398 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
399 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
400 						 amdgpu_crtc->pflip_status,
401 						 AMDGPU_FLIP_SUBMITTED,
402 						 amdgpu_crtc->crtc_id,
403 						 amdgpu_crtc);
404 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
405 		return;
406 	}
407 
408 	/* page flip completed. */
409 	e = amdgpu_crtc->event;
410 	amdgpu_crtc->event = NULL;
411 
412 	WARN_ON(!e);
413 
414 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
415 
416 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
417 	if (!vrr_active ||
418 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
419 				      &v_blank_end, &hpos, &vpos) ||
420 	    (vpos < v_blank_start)) {
421 		/* Update to correct count and vblank timestamp if racing with
422 		 * vblank irq. This also updates to the correct vblank timestamp
423 		 * even in VRR mode, as scanout is past the front-porch atm.
424 		 */
425 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
426 
427 		/* Wake up userspace by sending the pageflip event with proper
428 		 * count and timestamp of vblank of flip completion.
429 		 */
430 		if (e) {
431 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
432 
433 			/* Event sent, so done with vblank for this flip */
434 			drm_crtc_vblank_put(&amdgpu_crtc->base);
435 		}
436 	} else if (e) {
437 		/* VRR active and inside front-porch: vblank count and
438 		 * timestamp for pageflip event will only be up to date after
439 		 * drm_crtc_handle_vblank() has been executed from late vblank
440 		 * irq handler after start of back-porch (vline 0). We queue the
441 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
442 		 * updated timestamp and count, once it runs after us.
443 		 *
444 		 * We need to open-code this instead of using the helper
445 		 * drm_crtc_arm_vblank_event(), as that helper would
446 		 * call drm_crtc_accurate_vblank_count(), which we must
447 		 * not call in VRR mode while we are in front-porch!
448 		 */
449 
450 		/* sequence will be replaced by real count during send-out. */
451 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
452 		e->pipe = amdgpu_crtc->crtc_id;
453 
454 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
455 		e = NULL;
456 	}
457 
458 	/* Keep track of vblank of this flip for flip throttling. We use the
459 	 * cooked hw counter, as that one incremented at start of this vblank
460 	 * of pageflip completion, so last_flip_vblank is the forbidden count
461 	 * for queueing new pageflips if vsync + VRR is enabled.
462 	 */
463 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
464 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
465 
466 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
467 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
468 
469 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
470 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
471 		     vrr_active, (int) !e);
472 }
473 
474 static void dm_vupdate_high_irq(void *interrupt_params)
475 {
476 	struct common_irq_params *irq_params = interrupt_params;
477 	struct amdgpu_device *adev = irq_params->adev;
478 	struct amdgpu_crtc *acrtc;
479 	struct drm_device *drm_dev;
480 	struct drm_vblank_crtc *vblank;
481 	ktime_t frame_duration_ns, previous_timestamp;
482 	unsigned long flags;
483 	int vrr_active;
484 
485 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
486 
487 	if (acrtc) {
488 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
489 		drm_dev = acrtc->base.dev;
490 		vblank = &drm_dev->vblank[acrtc->base.index];
491 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
492 		frame_duration_ns = vblank->time - previous_timestamp;
493 
494 		if (frame_duration_ns > 0) {
495 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
496 						frame_duration_ns,
497 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
498 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
499 		}
500 
501 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
502 			      acrtc->crtc_id,
503 			      vrr_active);
504 
505 		/* Core vblank handling is done here after end of front-porch in
506 		 * vrr mode, as vblank timestamping will give valid results
507 		 * while now done after front-porch. This will also deliver
508 		 * page-flip completion events that have been queued to us
509 		 * if a pageflip happened inside front-porch.
510 		 */
511 		if (vrr_active) {
512 			drm_crtc_handle_vblank(&acrtc->base);
513 
514 			/* BTR processing for pre-DCE12 ASICs */
515 			if (acrtc->dm_irq_params.stream &&
516 			    adev->family < AMDGPU_FAMILY_AI) {
517 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
518 				mod_freesync_handle_v_update(
519 				    adev->dm.freesync_module,
520 				    acrtc->dm_irq_params.stream,
521 				    &acrtc->dm_irq_params.vrr_params);
522 
523 				dc_stream_adjust_vmin_vmax(
524 				    adev->dm.dc,
525 				    acrtc->dm_irq_params.stream,
526 				    &acrtc->dm_irq_params.vrr_params.adjust);
527 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
528 			}
529 		}
530 	}
531 }
532 
533 /**
534  * dm_crtc_high_irq() - Handles CRTC interrupt
535  * @interrupt_params: used for determining the CRTC instance
536  *
537  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
538  * event handler.
539  */
540 static void dm_crtc_high_irq(void *interrupt_params)
541 {
542 	struct common_irq_params *irq_params = interrupt_params;
543 	struct amdgpu_device *adev = irq_params->adev;
544 	struct amdgpu_crtc *acrtc;
545 	unsigned long flags;
546 	int vrr_active;
547 
548 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
549 	if (!acrtc)
550 		return;
551 
552 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
553 
554 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
555 		      vrr_active, acrtc->dm_irq_params.active_planes);
556 
557 	/**
558 	 * Core vblank handling at start of front-porch is only possible
559 	 * in non-vrr mode, as only there vblank timestamping will give
560 	 * valid results while done in front-porch. Otherwise defer it
561 	 * to dm_vupdate_high_irq after end of front-porch.
562 	 */
563 	if (!vrr_active)
564 		drm_crtc_handle_vblank(&acrtc->base);
565 
566 	/**
567 	 * Following stuff must happen at start of vblank, for crc
568 	 * computation and below-the-range btr support in vrr mode.
569 	 */
570 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
571 
572 	/* BTR updates need to happen before VUPDATE on Vega and above. */
573 	if (adev->family < AMDGPU_FAMILY_AI)
574 		return;
575 
576 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
577 
578 	if (acrtc->dm_irq_params.stream &&
579 	    acrtc->dm_irq_params.vrr_params.supported &&
580 	    acrtc->dm_irq_params.freesync_config.state ==
581 		    VRR_STATE_ACTIVE_VARIABLE) {
582 		mod_freesync_handle_v_update(adev->dm.freesync_module,
583 					     acrtc->dm_irq_params.stream,
584 					     &acrtc->dm_irq_params.vrr_params);
585 
586 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
587 					   &acrtc->dm_irq_params.vrr_params.adjust);
588 	}
589 
590 	/*
591 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
592 	 * In that case, pageflip completion interrupts won't fire and pageflip
593 	 * completion events won't get delivered. Prevent this by sending
594 	 * pending pageflip events from here if a flip is still pending.
595 	 *
596 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
597 	 * avoid race conditions between flip programming and completion,
598 	 * which could cause too early flip completion events.
599 	 */
600 	if (adev->family >= AMDGPU_FAMILY_RV &&
601 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
602 	    acrtc->dm_irq_params.active_planes == 0) {
603 		if (acrtc->event) {
604 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
605 			acrtc->event = NULL;
606 			drm_crtc_vblank_put(&acrtc->base);
607 		}
608 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
609 	}
610 
611 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
612 }
613 
614 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
615 /**
616  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
617  * DCN generation ASICs
618  * @interrupt_params: interrupt parameters
619  *
620  * Used to set crc window/read out crc value at vertical line 0 position
621  */
622 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
623 {
624 	struct common_irq_params *irq_params = interrupt_params;
625 	struct amdgpu_device *adev = irq_params->adev;
626 	struct amdgpu_crtc *acrtc;
627 
628 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
629 
630 	if (!acrtc)
631 		return;
632 
633 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
634 }
635 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
636 
637 /**
638  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
639  * @adev: amdgpu_device pointer
640  * @notify: dmub notification structure
641  *
642  * Dmub AUX or SET_CONFIG command completion processing callback
643  * Copies dmub notification to DM which is to be read by AUX command.
644  * issuing thread and also signals the event to wake up the thread.
645  */
646 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
647 					struct dmub_notification *notify)
648 {
649 	if (adev->dm.dmub_notify)
650 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
651 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
652 		complete(&adev->dm.dmub_aux_transfer_done);
653 }
654 
655 /**
656  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
657  * @adev: amdgpu_device pointer
658  * @notify: dmub notification structure
659  *
660  * Dmub Hpd interrupt processing callback. Gets displayindex through the
661  * ink index and calls helper to do the processing.
662  */
663 static void dmub_hpd_callback(struct amdgpu_device *adev,
664 			      struct dmub_notification *notify)
665 {
666 	struct amdgpu_dm_connector *aconnector;
667 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
668 	struct drm_connector *connector;
669 	struct drm_connector_list_iter iter;
670 	struct dc_link *link;
671 	uint8_t link_index = 0;
672 	struct drm_device *dev;
673 
674 	if (adev == NULL)
675 		return;
676 
677 	if (notify == NULL) {
678 		DRM_ERROR("DMUB HPD callback notification was NULL");
679 		return;
680 	}
681 
682 	if (notify->link_index > adev->dm.dc->link_count) {
683 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
684 		return;
685 	}
686 
687 	link_index = notify->link_index;
688 	link = adev->dm.dc->links[link_index];
689 	dev = adev->dm.ddev;
690 
691 	drm_connector_list_iter_begin(dev, &iter);
692 	drm_for_each_connector_iter(connector, &iter) {
693 		aconnector = to_amdgpu_dm_connector(connector);
694 		if (link && aconnector->dc_link == link) {
695 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
696 			hpd_aconnector = aconnector;
697 			break;
698 		}
699 	}
700 	drm_connector_list_iter_end(&iter);
701 
702 	if (hpd_aconnector) {
703 		if (notify->type == DMUB_NOTIFICATION_HPD)
704 			handle_hpd_irq_helper(hpd_aconnector);
705 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
706 			handle_hpd_rx_irq(hpd_aconnector);
707 	}
708 }
709 
710 /**
711  * register_dmub_notify_callback - Sets callback for DMUB notify
712  * @adev: amdgpu_device pointer
713  * @type: Type of dmub notification
714  * @callback: Dmub interrupt callback function
715  * @dmub_int_thread_offload: offload indicator
716  *
717  * API to register a dmub callback handler for a dmub notification
718  * Also sets indicator whether callback processing to be offloaded.
719  * to dmub interrupt handling thread
720  * Return: true if successfully registered, false if there is existing registration
721  */
722 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
723 					  enum dmub_notification_type type,
724 					  dmub_notify_interrupt_callback_t callback,
725 					  bool dmub_int_thread_offload)
726 {
727 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
728 		adev->dm.dmub_callback[type] = callback;
729 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
730 	} else
731 		return false;
732 
733 	return true;
734 }
735 
736 static void dm_handle_hpd_work(struct work_struct *work)
737 {
738 	struct dmub_hpd_work *dmub_hpd_wrk;
739 
740 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
741 
742 	if (!dmub_hpd_wrk->dmub_notify) {
743 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
744 		return;
745 	}
746 
747 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
748 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
749 		dmub_hpd_wrk->dmub_notify);
750 	}
751 
752 	kfree(dmub_hpd_wrk->dmub_notify);
753 	kfree(dmub_hpd_wrk);
754 
755 }
756 
757 #define DMUB_TRACE_MAX_READ 64
758 /**
759  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
760  * @interrupt_params: used for determining the Outbox instance
761  *
762  * Handles the Outbox Interrupt
763  * event handler.
764  */
765 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
766 {
767 	struct dmub_notification notify;
768 	struct common_irq_params *irq_params = interrupt_params;
769 	struct amdgpu_device *adev = irq_params->adev;
770 	struct amdgpu_display_manager *dm = &adev->dm;
771 	struct dmcub_trace_buf_entry entry = { 0 };
772 	uint32_t count = 0;
773 	struct dmub_hpd_work *dmub_hpd_wrk;
774 	struct dc_link *plink = NULL;
775 
776 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
777 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
778 
779 		do {
780 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
781 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
782 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
783 				continue;
784 			}
785 			if (!dm->dmub_callback[notify.type]) {
786 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
787 				continue;
788 			}
789 			if (dm->dmub_thread_offload[notify.type] == true) {
790 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
791 				if (!dmub_hpd_wrk) {
792 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
793 					return;
794 				}
795 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
796 				if (!dmub_hpd_wrk->dmub_notify) {
797 					kfree(dmub_hpd_wrk);
798 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
799 					return;
800 				}
801 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
802 				if (dmub_hpd_wrk->dmub_notify)
803 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
804 				dmub_hpd_wrk->adev = adev;
805 				if (notify.type == DMUB_NOTIFICATION_HPD) {
806 					plink = adev->dm.dc->links[notify.link_index];
807 					if (plink) {
808 						plink->hpd_status =
809 							notify.hpd_status == DP_HPD_PLUG;
810 					}
811 				}
812 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
813 			} else {
814 				dm->dmub_callback[notify.type](adev, &notify);
815 			}
816 		} while (notify.pending_notification);
817 	}
818 
819 
820 	do {
821 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
822 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
823 							entry.param0, entry.param1);
824 
825 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
826 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
827 		} else
828 			break;
829 
830 		count++;
831 
832 	} while (count <= DMUB_TRACE_MAX_READ);
833 
834 	if (count > DMUB_TRACE_MAX_READ)
835 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
836 }
837 
838 static int dm_set_clockgating_state(void *handle,
839 		  enum amd_clockgating_state state)
840 {
841 	return 0;
842 }
843 
844 static int dm_set_powergating_state(void *handle,
845 		  enum amd_powergating_state state)
846 {
847 	return 0;
848 }
849 
850 /* Prototypes of private functions */
851 static int dm_early_init(void* handle);
852 
853 /* Allocate memory for FBC compressed data  */
854 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
855 {
856 	struct drm_device *dev = connector->dev;
857 	struct amdgpu_device *adev = drm_to_adev(dev);
858 	struct dm_compressor_info *compressor = &adev->dm.compressor;
859 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
860 	struct drm_display_mode *mode;
861 	unsigned long max_size = 0;
862 
863 	if (adev->dm.dc->fbc_compressor == NULL)
864 		return;
865 
866 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
867 		return;
868 
869 	if (compressor->bo_ptr)
870 		return;
871 
872 
873 	list_for_each_entry(mode, &connector->modes, head) {
874 		if (max_size < mode->htotal * mode->vtotal)
875 			max_size = mode->htotal * mode->vtotal;
876 	}
877 
878 	if (max_size) {
879 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
880 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
881 			    &compressor->gpu_addr, &compressor->cpu_addr);
882 
883 		if (r)
884 			DRM_ERROR("DM: Failed to initialize FBC\n");
885 		else {
886 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
887 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
888 		}
889 
890 	}
891 
892 }
893 
894 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
895 					  int pipe, bool *enabled,
896 					  unsigned char *buf, int max_bytes)
897 {
898 	struct drm_device *dev = dev_get_drvdata(kdev);
899 	struct amdgpu_device *adev = drm_to_adev(dev);
900 	struct drm_connector *connector;
901 	struct drm_connector_list_iter conn_iter;
902 	struct amdgpu_dm_connector *aconnector;
903 	int ret = 0;
904 
905 	*enabled = false;
906 
907 	mutex_lock(&adev->dm.audio_lock);
908 
909 	drm_connector_list_iter_begin(dev, &conn_iter);
910 	drm_for_each_connector_iter(connector, &conn_iter) {
911 		aconnector = to_amdgpu_dm_connector(connector);
912 		if (aconnector->audio_inst != port)
913 			continue;
914 
915 		*enabled = true;
916 		ret = drm_eld_size(connector->eld);
917 		memcpy(buf, connector->eld, min(max_bytes, ret));
918 
919 		break;
920 	}
921 	drm_connector_list_iter_end(&conn_iter);
922 
923 	mutex_unlock(&adev->dm.audio_lock);
924 
925 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
926 
927 	return ret;
928 }
929 
930 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
931 	.get_eld = amdgpu_dm_audio_component_get_eld,
932 };
933 
934 static int amdgpu_dm_audio_component_bind(struct device *kdev,
935 				       struct device *hda_kdev, void *data)
936 {
937 	struct drm_device *dev = dev_get_drvdata(kdev);
938 	struct amdgpu_device *adev = drm_to_adev(dev);
939 	struct drm_audio_component *acomp = data;
940 
941 	acomp->ops = &amdgpu_dm_audio_component_ops;
942 	acomp->dev = kdev;
943 	adev->dm.audio_component = acomp;
944 
945 	return 0;
946 }
947 
948 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
949 					  struct device *hda_kdev, void *data)
950 {
951 	struct drm_device *dev = dev_get_drvdata(kdev);
952 	struct amdgpu_device *adev = drm_to_adev(dev);
953 	struct drm_audio_component *acomp = data;
954 
955 	acomp->ops = NULL;
956 	acomp->dev = NULL;
957 	adev->dm.audio_component = NULL;
958 }
959 
960 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
961 	.bind	= amdgpu_dm_audio_component_bind,
962 	.unbind	= amdgpu_dm_audio_component_unbind,
963 };
964 
965 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
966 {
967 	int i, ret;
968 
969 	if (!amdgpu_audio)
970 		return 0;
971 
972 	adev->mode_info.audio.enabled = true;
973 
974 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
975 
976 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
977 		adev->mode_info.audio.pin[i].channels = -1;
978 		adev->mode_info.audio.pin[i].rate = -1;
979 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
980 		adev->mode_info.audio.pin[i].status_bits = 0;
981 		adev->mode_info.audio.pin[i].category_code = 0;
982 		adev->mode_info.audio.pin[i].connected = false;
983 		adev->mode_info.audio.pin[i].id =
984 			adev->dm.dc->res_pool->audios[i]->inst;
985 		adev->mode_info.audio.pin[i].offset = 0;
986 	}
987 
988 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
989 	if (ret < 0)
990 		return ret;
991 
992 	adev->dm.audio_registered = true;
993 
994 	return 0;
995 }
996 
997 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
998 {
999 	if (!amdgpu_audio)
1000 		return;
1001 
1002 	if (!adev->mode_info.audio.enabled)
1003 		return;
1004 
1005 	if (adev->dm.audio_registered) {
1006 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1007 		adev->dm.audio_registered = false;
1008 	}
1009 
1010 	/* TODO: Disable audio? */
1011 
1012 	adev->mode_info.audio.enabled = false;
1013 }
1014 
1015 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1016 {
1017 	struct drm_audio_component *acomp = adev->dm.audio_component;
1018 
1019 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1020 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1021 
1022 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1023 						 pin, -1);
1024 	}
1025 }
1026 
1027 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1028 {
1029 	const struct dmcub_firmware_header_v1_0 *hdr;
1030 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1031 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1032 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1033 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1034 	struct abm *abm = adev->dm.dc->res_pool->abm;
1035 	struct dmub_srv_hw_params hw_params;
1036 	enum dmub_status status;
1037 	const unsigned char *fw_inst_const, *fw_bss_data;
1038 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1039 	bool has_hw_support;
1040 
1041 	if (!dmub_srv)
1042 		/* DMUB isn't supported on the ASIC. */
1043 		return 0;
1044 
1045 	if (!fb_info) {
1046 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1047 		return -EINVAL;
1048 	}
1049 
1050 	if (!dmub_fw) {
1051 		/* Firmware required for DMUB support. */
1052 		DRM_ERROR("No firmware provided for DMUB.\n");
1053 		return -EINVAL;
1054 	}
1055 
1056 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1057 	if (status != DMUB_STATUS_OK) {
1058 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1059 		return -EINVAL;
1060 	}
1061 
1062 	if (!has_hw_support) {
1063 		DRM_INFO("DMUB unsupported on ASIC\n");
1064 		return 0;
1065 	}
1066 
1067 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1068 	status = dmub_srv_hw_reset(dmub_srv);
1069 	if (status != DMUB_STATUS_OK)
1070 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1071 
1072 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1073 
1074 	fw_inst_const = dmub_fw->data +
1075 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1076 			PSP_HEADER_BYTES;
1077 
1078 	fw_bss_data = dmub_fw->data +
1079 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1080 		      le32_to_cpu(hdr->inst_const_bytes);
1081 
1082 	/* Copy firmware and bios info into FB memory. */
1083 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1084 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1085 
1086 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1087 
1088 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1089 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1090 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1091 	 * will be done by dm_dmub_hw_init
1092 	 */
1093 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1094 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1095 				fw_inst_const_size);
1096 	}
1097 
1098 	if (fw_bss_data_size)
1099 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1100 		       fw_bss_data, fw_bss_data_size);
1101 
1102 	/* Copy firmware bios info into FB memory. */
1103 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1104 	       adev->bios_size);
1105 
1106 	/* Reset regions that need to be reset. */
1107 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1108 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1109 
1110 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1111 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1112 
1113 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1114 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1115 
1116 	/* Initialize hardware. */
1117 	memset(&hw_params, 0, sizeof(hw_params));
1118 	hw_params.fb_base = adev->gmc.fb_start;
1119 	hw_params.fb_offset = adev->gmc.aper_base;
1120 
1121 	/* backdoor load firmware and trigger dmub running */
1122 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1123 		hw_params.load_inst_const = true;
1124 
1125 	if (dmcu)
1126 		hw_params.psp_version = dmcu->psp_version;
1127 
1128 	for (i = 0; i < fb_info->num_fb; ++i)
1129 		hw_params.fb[i] = &fb_info->fb[i];
1130 
1131 	switch (adev->ip_versions[DCE_HWIP][0]) {
1132 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1133 		hw_params.dpia_supported = true;
1134 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1135 		break;
1136 	default:
1137 		break;
1138 	}
1139 
1140 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1141 	if (status != DMUB_STATUS_OK) {
1142 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1143 		return -EINVAL;
1144 	}
1145 
1146 	/* Wait for firmware load to finish. */
1147 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1148 	if (status != DMUB_STATUS_OK)
1149 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1150 
1151 	/* Init DMCU and ABM if available. */
1152 	if (dmcu && abm) {
1153 		dmcu->funcs->dmcu_init(dmcu);
1154 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1155 	}
1156 
1157 	if (!adev->dm.dc->ctx->dmub_srv)
1158 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1159 	if (!adev->dm.dc->ctx->dmub_srv) {
1160 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1161 		return -ENOMEM;
1162 	}
1163 
1164 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1165 		 adev->dm.dmcub_fw_version);
1166 
1167 	return 0;
1168 }
1169 
1170 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1171 {
1172 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1173 	enum dmub_status status;
1174 	bool init;
1175 
1176 	if (!dmub_srv) {
1177 		/* DMUB isn't supported on the ASIC. */
1178 		return;
1179 	}
1180 
1181 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1182 	if (status != DMUB_STATUS_OK)
1183 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1184 
1185 	if (status == DMUB_STATUS_OK && init) {
1186 		/* Wait for firmware load to finish. */
1187 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1188 		if (status != DMUB_STATUS_OK)
1189 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1190 	} else {
1191 		/* Perform the full hardware initialization. */
1192 		dm_dmub_hw_init(adev);
1193 	}
1194 }
1195 
1196 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1197 {
1198 	uint64_t pt_base;
1199 	uint32_t logical_addr_low;
1200 	uint32_t logical_addr_high;
1201 	uint32_t agp_base, agp_bot, agp_top;
1202 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1203 
1204 	memset(pa_config, 0, sizeof(*pa_config));
1205 
1206 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1207 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1208 
1209 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1210 		/*
1211 		 * Raven2 has a HW issue that it is unable to use the vram which
1212 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1213 		 * workaround that increase system aperture high address (add 1)
1214 		 * to get rid of the VM fault and hardware hang.
1215 		 */
1216 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1217 	else
1218 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1219 
1220 	agp_base = 0;
1221 	agp_bot = adev->gmc.agp_start >> 24;
1222 	agp_top = adev->gmc.agp_end >> 24;
1223 
1224 
1225 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1226 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1227 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1228 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1229 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1230 	page_table_base.low_part = lower_32_bits(pt_base);
1231 
1232 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1233 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1234 
1235 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1236 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1237 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1238 
1239 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1240 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1241 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1242 
1243 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1244 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1245 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1246 
1247 	pa_config->is_hvm_enabled = 0;
1248 
1249 }
1250 
1251 static void vblank_control_worker(struct work_struct *work)
1252 {
1253 	struct vblank_control_work *vblank_work =
1254 		container_of(work, struct vblank_control_work, work);
1255 	struct amdgpu_display_manager *dm = vblank_work->dm;
1256 
1257 	mutex_lock(&dm->dc_lock);
1258 
1259 	if (vblank_work->enable)
1260 		dm->active_vblank_irq_count++;
1261 	else if(dm->active_vblank_irq_count)
1262 		dm->active_vblank_irq_count--;
1263 
1264 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1265 
1266 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1267 
1268 	/*
1269 	 * Control PSR based on vblank requirements from OS
1270 	 *
1271 	 * If panel supports PSR SU, there's no need to disable PSR when OS is
1272 	 * submitting fast atomic commits (we infer this by whether the OS
1273 	 * requests vblank events). Fast atomic commits will simply trigger a
1274 	 * full-frame-update (FFU); a specific case of selective-update (SU)
1275 	 * where the SU region is the full hactive*vactive region. See
1276 	 * fill_dc_dirty_rects().
1277 	 */
1278 	if (vblank_work->stream && vblank_work->stream->link) {
1279 		if (vblank_work->enable) {
1280 			if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1281 			    vblank_work->stream->link->psr_settings.psr_allow_active)
1282 				amdgpu_dm_psr_disable(vblank_work->stream);
1283 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1284 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1285 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1286 			amdgpu_dm_psr_enable(vblank_work->stream);
1287 		}
1288 	}
1289 
1290 	mutex_unlock(&dm->dc_lock);
1291 
1292 	dc_stream_release(vblank_work->stream);
1293 
1294 	kfree(vblank_work);
1295 }
1296 
1297 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1298 {
1299 	struct hpd_rx_irq_offload_work *offload_work;
1300 	struct amdgpu_dm_connector *aconnector;
1301 	struct dc_link *dc_link;
1302 	struct amdgpu_device *adev;
1303 	enum dc_connection_type new_connection_type = dc_connection_none;
1304 	unsigned long flags;
1305 
1306 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1307 	aconnector = offload_work->offload_wq->aconnector;
1308 
1309 	if (!aconnector) {
1310 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1311 		goto skip;
1312 	}
1313 
1314 	adev = drm_to_adev(aconnector->base.dev);
1315 	dc_link = aconnector->dc_link;
1316 
1317 	mutex_lock(&aconnector->hpd_lock);
1318 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1319 		DRM_ERROR("KMS: Failed to detect connector\n");
1320 	mutex_unlock(&aconnector->hpd_lock);
1321 
1322 	if (new_connection_type == dc_connection_none)
1323 		goto skip;
1324 
1325 	if (amdgpu_in_reset(adev))
1326 		goto skip;
1327 
1328 	mutex_lock(&adev->dm.dc_lock);
1329 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1330 		dc_link_dp_handle_automated_test(dc_link);
1331 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1332 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1333 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1334 		dc_link_dp_handle_link_loss(dc_link);
1335 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1336 		offload_work->offload_wq->is_handling_link_loss = false;
1337 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1338 	}
1339 	mutex_unlock(&adev->dm.dc_lock);
1340 
1341 skip:
1342 	kfree(offload_work);
1343 
1344 }
1345 
1346 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1347 {
1348 	int max_caps = dc->caps.max_links;
1349 	int i = 0;
1350 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1351 
1352 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1353 
1354 	if (!hpd_rx_offload_wq)
1355 		return NULL;
1356 
1357 
1358 	for (i = 0; i < max_caps; i++) {
1359 		hpd_rx_offload_wq[i].wq =
1360 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1361 
1362 		if (hpd_rx_offload_wq[i].wq == NULL) {
1363 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1364 			return NULL;
1365 		}
1366 
1367 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1368 	}
1369 
1370 	return hpd_rx_offload_wq;
1371 }
1372 
1373 struct amdgpu_stutter_quirk {
1374 	u16 chip_vendor;
1375 	u16 chip_device;
1376 	u16 subsys_vendor;
1377 	u16 subsys_device;
1378 	u8 revision;
1379 };
1380 
1381 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1382 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1383 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1384 	{ 0, 0, 0, 0, 0 },
1385 };
1386 
1387 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1388 {
1389 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1390 
1391 	while (p && p->chip_device != 0) {
1392 		if (pdev->vendor == p->chip_vendor &&
1393 		    pdev->device == p->chip_device &&
1394 		    pdev->subsystem_vendor == p->subsys_vendor &&
1395 		    pdev->subsystem_device == p->subsys_device &&
1396 		    pdev->revision == p->revision) {
1397 			return true;
1398 		}
1399 		++p;
1400 	}
1401 	return false;
1402 }
1403 
1404 static int amdgpu_dm_init(struct amdgpu_device *adev)
1405 {
1406 	struct dc_init_data init_data;
1407 #ifdef CONFIG_DRM_AMD_DC_HDCP
1408 	struct dc_callback_init init_params;
1409 #endif
1410 	int r;
1411 
1412 	adev->dm.ddev = adev_to_drm(adev);
1413 	adev->dm.adev = adev;
1414 
1415 	/* Zero all the fields */
1416 	memset(&init_data, 0, sizeof(init_data));
1417 #ifdef CONFIG_DRM_AMD_DC_HDCP
1418 	memset(&init_params, 0, sizeof(init_params));
1419 #endif
1420 
1421 	mutex_init(&adev->dm.dc_lock);
1422 	mutex_init(&adev->dm.audio_lock);
1423 	spin_lock_init(&adev->dm.vblank_lock);
1424 
1425 	if(amdgpu_dm_irq_init(adev)) {
1426 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1427 		goto error;
1428 	}
1429 
1430 	init_data.asic_id.chip_family = adev->family;
1431 
1432 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1433 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1434 	init_data.asic_id.chip_id = adev->pdev->device;
1435 
1436 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1437 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1438 	init_data.asic_id.atombios_base_address =
1439 		adev->mode_info.atom_context->bios;
1440 
1441 	init_data.driver = adev;
1442 
1443 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1444 
1445 	if (!adev->dm.cgs_device) {
1446 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1447 		goto error;
1448 	}
1449 
1450 	init_data.cgs_device = adev->dm.cgs_device;
1451 
1452 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1453 
1454 	switch (adev->ip_versions[DCE_HWIP][0]) {
1455 	case IP_VERSION(2, 1, 0):
1456 		switch (adev->dm.dmcub_fw_version) {
1457 		case 0: /* development */
1458 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1459 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1460 			init_data.flags.disable_dmcu = false;
1461 			break;
1462 		default:
1463 			init_data.flags.disable_dmcu = true;
1464 		}
1465 		break;
1466 	case IP_VERSION(2, 0, 3):
1467 		init_data.flags.disable_dmcu = true;
1468 		break;
1469 	default:
1470 		break;
1471 	}
1472 
1473 	switch (adev->asic_type) {
1474 	case CHIP_CARRIZO:
1475 	case CHIP_STONEY:
1476 		init_data.flags.gpu_vm_support = true;
1477 		break;
1478 	default:
1479 		switch (adev->ip_versions[DCE_HWIP][0]) {
1480 		case IP_VERSION(1, 0, 0):
1481 		case IP_VERSION(1, 0, 1):
1482 			/* enable S/G on PCO and RV2 */
1483 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1484 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1485 				init_data.flags.gpu_vm_support = true;
1486 			break;
1487 		case IP_VERSION(2, 1, 0):
1488 		case IP_VERSION(3, 0, 1):
1489 		case IP_VERSION(3, 1, 2):
1490 		case IP_VERSION(3, 1, 3):
1491 		case IP_VERSION(3, 1, 5):
1492 		case IP_VERSION(3, 1, 6):
1493 			init_data.flags.gpu_vm_support = true;
1494 			break;
1495 		default:
1496 			break;
1497 		}
1498 		break;
1499 	}
1500 
1501 	if (init_data.flags.gpu_vm_support)
1502 		adev->mode_info.gpu_vm_support = true;
1503 
1504 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1505 		init_data.flags.fbc_support = true;
1506 
1507 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1508 		init_data.flags.multi_mon_pp_mclk_switch = true;
1509 
1510 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1511 		init_data.flags.disable_fractional_pwm = true;
1512 
1513 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1514 		init_data.flags.edp_no_power_sequencing = true;
1515 
1516 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1517 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1518 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1519 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1520 
1521 	init_data.flags.seamless_boot_edp_requested = false;
1522 
1523 	if (check_seamless_boot_capability(adev)) {
1524 		init_data.flags.seamless_boot_edp_requested = true;
1525 		init_data.flags.allow_seamless_boot_optimization = true;
1526 		DRM_INFO("Seamless boot condition check passed\n");
1527 	}
1528 
1529 	init_data.flags.enable_mipi_converter_optimization = true;
1530 
1531 	INIT_LIST_HEAD(&adev->dm.da_list);
1532 	/* Display Core create. */
1533 	adev->dm.dc = dc_create(&init_data);
1534 
1535 	if (adev->dm.dc) {
1536 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1537 	} else {
1538 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1539 		goto error;
1540 	}
1541 
1542 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1543 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1544 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1545 	}
1546 
1547 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1548 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1549 	if (dm_should_disable_stutter(adev->pdev))
1550 		adev->dm.dc->debug.disable_stutter = true;
1551 
1552 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1553 		adev->dm.dc->debug.disable_stutter = true;
1554 
1555 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1556 		adev->dm.dc->debug.disable_dsc = true;
1557 		adev->dm.dc->debug.disable_dsc_edp = true;
1558 	}
1559 
1560 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1561 		adev->dm.dc->debug.disable_clock_gate = true;
1562 
1563 	if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1564 		adev->dm.dc->debug.force_subvp_mclk_switch = true;
1565 
1566 	r = dm_dmub_hw_init(adev);
1567 	if (r) {
1568 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1569 		goto error;
1570 	}
1571 
1572 	dc_hardware_init(adev->dm.dc);
1573 
1574 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1575 	if (!adev->dm.hpd_rx_offload_wq) {
1576 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1577 		goto error;
1578 	}
1579 
1580 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1581 		struct dc_phy_addr_space_config pa_config;
1582 
1583 		mmhub_read_system_context(adev, &pa_config);
1584 
1585 		// Call the DC init_memory func
1586 		dc_setup_system_context(adev->dm.dc, &pa_config);
1587 	}
1588 
1589 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1590 	if (!adev->dm.freesync_module) {
1591 		DRM_ERROR(
1592 		"amdgpu: failed to initialize freesync_module.\n");
1593 	} else
1594 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1595 				adev->dm.freesync_module);
1596 
1597 	amdgpu_dm_init_color_mod();
1598 
1599 	if (adev->dm.dc->caps.max_links > 0) {
1600 		adev->dm.vblank_control_workqueue =
1601 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1602 		if (!adev->dm.vblank_control_workqueue)
1603 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1604 	}
1605 
1606 #ifdef CONFIG_DRM_AMD_DC_HDCP
1607 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1608 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1609 
1610 		if (!adev->dm.hdcp_workqueue)
1611 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1612 		else
1613 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1614 
1615 		dc_init_callbacks(adev->dm.dc, &init_params);
1616 	}
1617 #endif
1618 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1619 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1620 #endif
1621 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1622 		init_completion(&adev->dm.dmub_aux_transfer_done);
1623 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1624 		if (!adev->dm.dmub_notify) {
1625 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1626 			goto error;
1627 		}
1628 
1629 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1630 		if (!adev->dm.delayed_hpd_wq) {
1631 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1632 			goto error;
1633 		}
1634 
1635 		amdgpu_dm_outbox_init(adev);
1636 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1637 			dmub_aux_setconfig_callback, false)) {
1638 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1639 			goto error;
1640 		}
1641 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1642 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1643 			goto error;
1644 		}
1645 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1646 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1647 			goto error;
1648 		}
1649 	}
1650 
1651 	if (amdgpu_dm_initialize_drm_device(adev)) {
1652 		DRM_ERROR(
1653 		"amdgpu: failed to initialize sw for display support.\n");
1654 		goto error;
1655 	}
1656 
1657 	/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1658 	 * It is expected that DMUB will resend any pending notifications at this point, for
1659 	 * example HPD from DPIA.
1660 	 */
1661 	if (dc_is_dmub_outbox_supported(adev->dm.dc))
1662 		dc_enable_dmub_outbox(adev->dm.dc);
1663 
1664 	/* create fake encoders for MST */
1665 	dm_dp_create_fake_mst_encoders(adev);
1666 
1667 	/* TODO: Add_display_info? */
1668 
1669 	/* TODO use dynamic cursor width */
1670 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1671 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1672 
1673 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1674 		DRM_ERROR(
1675 		"amdgpu: failed to initialize sw for display support.\n");
1676 		goto error;
1677 	}
1678 
1679 
1680 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1681 
1682 	return 0;
1683 error:
1684 	amdgpu_dm_fini(adev);
1685 
1686 	return -EINVAL;
1687 }
1688 
1689 static int amdgpu_dm_early_fini(void *handle)
1690 {
1691 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1692 
1693 	amdgpu_dm_audio_fini(adev);
1694 
1695 	return 0;
1696 }
1697 
1698 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1699 {
1700 	int i;
1701 
1702 	if (adev->dm.vblank_control_workqueue) {
1703 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1704 		adev->dm.vblank_control_workqueue = NULL;
1705 	}
1706 
1707 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1708 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1709 	}
1710 
1711 	amdgpu_dm_destroy_drm_device(&adev->dm);
1712 
1713 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1714 	if (adev->dm.crc_rd_wrk) {
1715 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1716 		kfree(adev->dm.crc_rd_wrk);
1717 		adev->dm.crc_rd_wrk = NULL;
1718 	}
1719 #endif
1720 #ifdef CONFIG_DRM_AMD_DC_HDCP
1721 	if (adev->dm.hdcp_workqueue) {
1722 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1723 		adev->dm.hdcp_workqueue = NULL;
1724 	}
1725 
1726 	if (adev->dm.dc)
1727 		dc_deinit_callbacks(adev->dm.dc);
1728 #endif
1729 
1730 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1731 
1732 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1733 		kfree(adev->dm.dmub_notify);
1734 		adev->dm.dmub_notify = NULL;
1735 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1736 		adev->dm.delayed_hpd_wq = NULL;
1737 	}
1738 
1739 	if (adev->dm.dmub_bo)
1740 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1741 				      &adev->dm.dmub_bo_gpu_addr,
1742 				      &adev->dm.dmub_bo_cpu_addr);
1743 
1744 	if (adev->dm.hpd_rx_offload_wq) {
1745 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1746 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1747 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1748 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1749 			}
1750 		}
1751 
1752 		kfree(adev->dm.hpd_rx_offload_wq);
1753 		adev->dm.hpd_rx_offload_wq = NULL;
1754 	}
1755 
1756 	/* DC Destroy TODO: Replace destroy DAL */
1757 	if (adev->dm.dc)
1758 		dc_destroy(&adev->dm.dc);
1759 	/*
1760 	 * TODO: pageflip, vlank interrupt
1761 	 *
1762 	 * amdgpu_dm_irq_fini(adev);
1763 	 */
1764 
1765 	if (adev->dm.cgs_device) {
1766 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1767 		adev->dm.cgs_device = NULL;
1768 	}
1769 	if (adev->dm.freesync_module) {
1770 		mod_freesync_destroy(adev->dm.freesync_module);
1771 		adev->dm.freesync_module = NULL;
1772 	}
1773 
1774 	mutex_destroy(&adev->dm.audio_lock);
1775 	mutex_destroy(&adev->dm.dc_lock);
1776 
1777 	return;
1778 }
1779 
1780 static int load_dmcu_fw(struct amdgpu_device *adev)
1781 {
1782 	const char *fw_name_dmcu = NULL;
1783 	int r;
1784 	const struct dmcu_firmware_header_v1_0 *hdr;
1785 
1786 	switch(adev->asic_type) {
1787 #if defined(CONFIG_DRM_AMD_DC_SI)
1788 	case CHIP_TAHITI:
1789 	case CHIP_PITCAIRN:
1790 	case CHIP_VERDE:
1791 	case CHIP_OLAND:
1792 #endif
1793 	case CHIP_BONAIRE:
1794 	case CHIP_HAWAII:
1795 	case CHIP_KAVERI:
1796 	case CHIP_KABINI:
1797 	case CHIP_MULLINS:
1798 	case CHIP_TONGA:
1799 	case CHIP_FIJI:
1800 	case CHIP_CARRIZO:
1801 	case CHIP_STONEY:
1802 	case CHIP_POLARIS11:
1803 	case CHIP_POLARIS10:
1804 	case CHIP_POLARIS12:
1805 	case CHIP_VEGAM:
1806 	case CHIP_VEGA10:
1807 	case CHIP_VEGA12:
1808 	case CHIP_VEGA20:
1809 		return 0;
1810 	case CHIP_NAVI12:
1811 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1812 		break;
1813 	case CHIP_RAVEN:
1814 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1815 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1816 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1817 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1818 		else
1819 			return 0;
1820 		break;
1821 	default:
1822 		switch (adev->ip_versions[DCE_HWIP][0]) {
1823 		case IP_VERSION(2, 0, 2):
1824 		case IP_VERSION(2, 0, 3):
1825 		case IP_VERSION(2, 0, 0):
1826 		case IP_VERSION(2, 1, 0):
1827 		case IP_VERSION(3, 0, 0):
1828 		case IP_VERSION(3, 0, 2):
1829 		case IP_VERSION(3, 0, 3):
1830 		case IP_VERSION(3, 0, 1):
1831 		case IP_VERSION(3, 1, 2):
1832 		case IP_VERSION(3, 1, 3):
1833 		case IP_VERSION(3, 1, 5):
1834 		case IP_VERSION(3, 1, 6):
1835 		case IP_VERSION(3, 2, 0):
1836 		case IP_VERSION(3, 2, 1):
1837 			return 0;
1838 		default:
1839 			break;
1840 		}
1841 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1842 		return -EINVAL;
1843 	}
1844 
1845 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1846 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1847 		return 0;
1848 	}
1849 
1850 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1851 	if (r == -ENOENT) {
1852 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1853 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1854 		adev->dm.fw_dmcu = NULL;
1855 		return 0;
1856 	}
1857 	if (r) {
1858 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1859 			fw_name_dmcu);
1860 		return r;
1861 	}
1862 
1863 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1864 	if (r) {
1865 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1866 			fw_name_dmcu);
1867 		release_firmware(adev->dm.fw_dmcu);
1868 		adev->dm.fw_dmcu = NULL;
1869 		return r;
1870 	}
1871 
1872 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1873 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1874 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1875 	adev->firmware.fw_size +=
1876 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1877 
1878 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1879 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1880 	adev->firmware.fw_size +=
1881 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1882 
1883 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1884 
1885 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1886 
1887 	return 0;
1888 }
1889 
1890 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1891 {
1892 	struct amdgpu_device *adev = ctx;
1893 
1894 	return dm_read_reg(adev->dm.dc->ctx, address);
1895 }
1896 
1897 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1898 				     uint32_t value)
1899 {
1900 	struct amdgpu_device *adev = ctx;
1901 
1902 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1903 }
1904 
1905 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1906 {
1907 	struct dmub_srv_create_params create_params;
1908 	struct dmub_srv_region_params region_params;
1909 	struct dmub_srv_region_info region_info;
1910 	struct dmub_srv_fb_params fb_params;
1911 	struct dmub_srv_fb_info *fb_info;
1912 	struct dmub_srv *dmub_srv;
1913 	const struct dmcub_firmware_header_v1_0 *hdr;
1914 	const char *fw_name_dmub;
1915 	enum dmub_asic dmub_asic;
1916 	enum dmub_status status;
1917 	int r;
1918 
1919 	switch (adev->ip_versions[DCE_HWIP][0]) {
1920 	case IP_VERSION(2, 1, 0):
1921 		dmub_asic = DMUB_ASIC_DCN21;
1922 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1923 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1924 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1925 		break;
1926 	case IP_VERSION(3, 0, 0):
1927 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1928 			dmub_asic = DMUB_ASIC_DCN30;
1929 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1930 		} else {
1931 			dmub_asic = DMUB_ASIC_DCN30;
1932 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1933 		}
1934 		break;
1935 	case IP_VERSION(3, 0, 1):
1936 		dmub_asic = DMUB_ASIC_DCN301;
1937 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1938 		break;
1939 	case IP_VERSION(3, 0, 2):
1940 		dmub_asic = DMUB_ASIC_DCN302;
1941 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1942 		break;
1943 	case IP_VERSION(3, 0, 3):
1944 		dmub_asic = DMUB_ASIC_DCN303;
1945 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1946 		break;
1947 	case IP_VERSION(3, 1, 2):
1948 	case IP_VERSION(3, 1, 3):
1949 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1950 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1951 		break;
1952 	case IP_VERSION(3, 1, 5):
1953 		dmub_asic = DMUB_ASIC_DCN315;
1954 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1955 		break;
1956 	case IP_VERSION(3, 1, 6):
1957 		dmub_asic = DMUB_ASIC_DCN316;
1958 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1959 		break;
1960 	case IP_VERSION(3, 2, 0):
1961 		dmub_asic = DMUB_ASIC_DCN32;
1962 		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1963 		break;
1964 	case IP_VERSION(3, 2, 1):
1965 		dmub_asic = DMUB_ASIC_DCN321;
1966 		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1967 		break;
1968 	default:
1969 		/* ASIC doesn't support DMUB. */
1970 		return 0;
1971 	}
1972 
1973 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1974 	if (r) {
1975 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1976 		return 0;
1977 	}
1978 
1979 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1980 	if (r) {
1981 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1982 		return 0;
1983 	}
1984 
1985 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1986 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1987 
1988 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1989 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1990 			AMDGPU_UCODE_ID_DMCUB;
1991 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1992 			adev->dm.dmub_fw;
1993 		adev->firmware.fw_size +=
1994 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1995 
1996 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1997 			 adev->dm.dmcub_fw_version);
1998 	}
1999 
2000 
2001 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2002 	dmub_srv = adev->dm.dmub_srv;
2003 
2004 	if (!dmub_srv) {
2005 		DRM_ERROR("Failed to allocate DMUB service!\n");
2006 		return -ENOMEM;
2007 	}
2008 
2009 	memset(&create_params, 0, sizeof(create_params));
2010 	create_params.user_ctx = adev;
2011 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2012 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2013 	create_params.asic = dmub_asic;
2014 
2015 	/* Create the DMUB service. */
2016 	status = dmub_srv_create(dmub_srv, &create_params);
2017 	if (status != DMUB_STATUS_OK) {
2018 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2019 		return -EINVAL;
2020 	}
2021 
2022 	/* Calculate the size of all the regions for the DMUB service. */
2023 	memset(&region_params, 0, sizeof(region_params));
2024 
2025 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2026 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2027 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2028 	region_params.vbios_size = adev->bios_size;
2029 	region_params.fw_bss_data = region_params.bss_data_size ?
2030 		adev->dm.dmub_fw->data +
2031 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2032 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2033 	region_params.fw_inst_const =
2034 		adev->dm.dmub_fw->data +
2035 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2036 		PSP_HEADER_BYTES;
2037 
2038 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2039 					   &region_info);
2040 
2041 	if (status != DMUB_STATUS_OK) {
2042 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2043 		return -EINVAL;
2044 	}
2045 
2046 	/*
2047 	 * Allocate a framebuffer based on the total size of all the regions.
2048 	 * TODO: Move this into GART.
2049 	 */
2050 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2051 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2052 				    &adev->dm.dmub_bo_gpu_addr,
2053 				    &adev->dm.dmub_bo_cpu_addr);
2054 	if (r)
2055 		return r;
2056 
2057 	/* Rebase the regions on the framebuffer address. */
2058 	memset(&fb_params, 0, sizeof(fb_params));
2059 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2060 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2061 	fb_params.region_info = &region_info;
2062 
2063 	adev->dm.dmub_fb_info =
2064 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2065 	fb_info = adev->dm.dmub_fb_info;
2066 
2067 	if (!fb_info) {
2068 		DRM_ERROR(
2069 			"Failed to allocate framebuffer info for DMUB service!\n");
2070 		return -ENOMEM;
2071 	}
2072 
2073 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2074 	if (status != DMUB_STATUS_OK) {
2075 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2076 		return -EINVAL;
2077 	}
2078 
2079 	return 0;
2080 }
2081 
2082 static int dm_sw_init(void *handle)
2083 {
2084 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2085 	int r;
2086 
2087 	r = dm_dmub_sw_init(adev);
2088 	if (r)
2089 		return r;
2090 
2091 	return load_dmcu_fw(adev);
2092 }
2093 
2094 static int dm_sw_fini(void *handle)
2095 {
2096 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2097 
2098 	kfree(adev->dm.dmub_fb_info);
2099 	adev->dm.dmub_fb_info = NULL;
2100 
2101 	if (adev->dm.dmub_srv) {
2102 		dmub_srv_destroy(adev->dm.dmub_srv);
2103 		adev->dm.dmub_srv = NULL;
2104 	}
2105 
2106 	release_firmware(adev->dm.dmub_fw);
2107 	adev->dm.dmub_fw = NULL;
2108 
2109 	release_firmware(adev->dm.fw_dmcu);
2110 	adev->dm.fw_dmcu = NULL;
2111 
2112 	return 0;
2113 }
2114 
2115 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2116 {
2117 	struct amdgpu_dm_connector *aconnector;
2118 	struct drm_connector *connector;
2119 	struct drm_connector_list_iter iter;
2120 	int ret = 0;
2121 
2122 	drm_connector_list_iter_begin(dev, &iter);
2123 	drm_for_each_connector_iter(connector, &iter) {
2124 		aconnector = to_amdgpu_dm_connector(connector);
2125 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2126 		    aconnector->mst_mgr.aux) {
2127 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2128 					 aconnector,
2129 					 aconnector->base.base.id);
2130 
2131 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2132 			if (ret < 0) {
2133 				DRM_ERROR("DM_MST: Failed to start MST\n");
2134 				aconnector->dc_link->type =
2135 					dc_connection_single;
2136 				break;
2137 			}
2138 		}
2139 	}
2140 	drm_connector_list_iter_end(&iter);
2141 
2142 	return ret;
2143 }
2144 
2145 static int dm_late_init(void *handle)
2146 {
2147 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2148 
2149 	struct dmcu_iram_parameters params;
2150 	unsigned int linear_lut[16];
2151 	int i;
2152 	struct dmcu *dmcu = NULL;
2153 
2154 	dmcu = adev->dm.dc->res_pool->dmcu;
2155 
2156 	for (i = 0; i < 16; i++)
2157 		linear_lut[i] = 0xFFFF * i / 15;
2158 
2159 	params.set = 0;
2160 	params.backlight_ramping_override = false;
2161 	params.backlight_ramping_start = 0xCCCC;
2162 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2163 	params.backlight_lut_array_size = 16;
2164 	params.backlight_lut_array = linear_lut;
2165 
2166 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2167 	 * 0xFFFF x 0.01 = 0x28F
2168 	 */
2169 	params.min_abm_backlight = 0x28F;
2170 	/* In the case where abm is implemented on dmcub,
2171 	* dmcu object will be null.
2172 	* ABM 2.4 and up are implemented on dmcub.
2173 	*/
2174 	if (dmcu) {
2175 		if (!dmcu_load_iram(dmcu, params))
2176 			return -EINVAL;
2177 	} else if (adev->dm.dc->ctx->dmub_srv) {
2178 		struct dc_link *edp_links[MAX_NUM_EDP];
2179 		int edp_num;
2180 
2181 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2182 		for (i = 0; i < edp_num; i++) {
2183 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2184 				return -EINVAL;
2185 		}
2186 	}
2187 
2188 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2189 }
2190 
2191 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2192 {
2193 	struct amdgpu_dm_connector *aconnector;
2194 	struct drm_connector *connector;
2195 	struct drm_connector_list_iter iter;
2196 	struct drm_dp_mst_topology_mgr *mgr;
2197 	int ret;
2198 	bool need_hotplug = false;
2199 
2200 	drm_connector_list_iter_begin(dev, &iter);
2201 	drm_for_each_connector_iter(connector, &iter) {
2202 		aconnector = to_amdgpu_dm_connector(connector);
2203 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2204 		    aconnector->mst_port)
2205 			continue;
2206 
2207 		mgr = &aconnector->mst_mgr;
2208 
2209 		if (suspend) {
2210 			drm_dp_mst_topology_mgr_suspend(mgr);
2211 		} else {
2212 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2213 			if (ret < 0) {
2214 				dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2215 					aconnector->dc_link);
2216 				need_hotplug = true;
2217 			}
2218 		}
2219 	}
2220 	drm_connector_list_iter_end(&iter);
2221 
2222 	if (need_hotplug)
2223 		drm_kms_helper_hotplug_event(dev);
2224 }
2225 
2226 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2227 {
2228 	int ret = 0;
2229 
2230 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2231 	 * on window driver dc implementation.
2232 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2233 	 * should be passed to smu during boot up and resume from s3.
2234 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2235 	 * dcn20_resource_construct
2236 	 * then call pplib functions below to pass the settings to smu:
2237 	 * smu_set_watermarks_for_clock_ranges
2238 	 * smu_set_watermarks_table
2239 	 * navi10_set_watermarks_table
2240 	 * smu_write_watermarks_table
2241 	 *
2242 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2243 	 * dc has implemented different flow for window driver:
2244 	 * dc_hardware_init / dc_set_power_state
2245 	 * dcn10_init_hw
2246 	 * notify_wm_ranges
2247 	 * set_wm_ranges
2248 	 * -- Linux
2249 	 * smu_set_watermarks_for_clock_ranges
2250 	 * renoir_set_watermarks_table
2251 	 * smu_write_watermarks_table
2252 	 *
2253 	 * For Linux,
2254 	 * dc_hardware_init -> amdgpu_dm_init
2255 	 * dc_set_power_state --> dm_resume
2256 	 *
2257 	 * therefore, this function apply to navi10/12/14 but not Renoir
2258 	 * *
2259 	 */
2260 	switch (adev->ip_versions[DCE_HWIP][0]) {
2261 	case IP_VERSION(2, 0, 2):
2262 	case IP_VERSION(2, 0, 0):
2263 		break;
2264 	default:
2265 		return 0;
2266 	}
2267 
2268 	ret = amdgpu_dpm_write_watermarks_table(adev);
2269 	if (ret) {
2270 		DRM_ERROR("Failed to update WMTABLE!\n");
2271 		return ret;
2272 	}
2273 
2274 	return 0;
2275 }
2276 
2277 /**
2278  * dm_hw_init() - Initialize DC device
2279  * @handle: The base driver device containing the amdgpu_dm device.
2280  *
2281  * Initialize the &struct amdgpu_display_manager device. This involves calling
2282  * the initializers of each DM component, then populating the struct with them.
2283  *
2284  * Although the function implies hardware initialization, both hardware and
2285  * software are initialized here. Splitting them out to their relevant init
2286  * hooks is a future TODO item.
2287  *
2288  * Some notable things that are initialized here:
2289  *
2290  * - Display Core, both software and hardware
2291  * - DC modules that we need (freesync and color management)
2292  * - DRM software states
2293  * - Interrupt sources and handlers
2294  * - Vblank support
2295  * - Debug FS entries, if enabled
2296  */
2297 static int dm_hw_init(void *handle)
2298 {
2299 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2300 	/* Create DAL display manager */
2301 	amdgpu_dm_init(adev);
2302 	amdgpu_dm_hpd_init(adev);
2303 
2304 	return 0;
2305 }
2306 
2307 /**
2308  * dm_hw_fini() - Teardown DC device
2309  * @handle: The base driver device containing the amdgpu_dm device.
2310  *
2311  * Teardown components within &struct amdgpu_display_manager that require
2312  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2313  * were loaded. Also flush IRQ workqueues and disable them.
2314  */
2315 static int dm_hw_fini(void *handle)
2316 {
2317 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2318 
2319 	amdgpu_dm_hpd_fini(adev);
2320 
2321 	amdgpu_dm_irq_fini(adev);
2322 	amdgpu_dm_fini(adev);
2323 	return 0;
2324 }
2325 
2326 
2327 static int dm_enable_vblank(struct drm_crtc *crtc);
2328 static void dm_disable_vblank(struct drm_crtc *crtc);
2329 
2330 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2331 				 struct dc_state *state, bool enable)
2332 {
2333 	enum dc_irq_source irq_source;
2334 	struct amdgpu_crtc *acrtc;
2335 	int rc = -EBUSY;
2336 	int i = 0;
2337 
2338 	for (i = 0; i < state->stream_count; i++) {
2339 		acrtc = get_crtc_by_otg_inst(
2340 				adev, state->stream_status[i].primary_otg_inst);
2341 
2342 		if (acrtc && state->stream_status[i].plane_count != 0) {
2343 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2344 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2345 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2346 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2347 			if (rc)
2348 				DRM_WARN("Failed to %s pflip interrupts\n",
2349 					 enable ? "enable" : "disable");
2350 
2351 			if (enable) {
2352 				rc = dm_enable_vblank(&acrtc->base);
2353 				if (rc)
2354 					DRM_WARN("Failed to enable vblank interrupts\n");
2355 			} else {
2356 				dm_disable_vblank(&acrtc->base);
2357 			}
2358 
2359 		}
2360 	}
2361 
2362 }
2363 
2364 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2365 {
2366 	struct dc_state *context = NULL;
2367 	enum dc_status res = DC_ERROR_UNEXPECTED;
2368 	int i;
2369 	struct dc_stream_state *del_streams[MAX_PIPES];
2370 	int del_streams_count = 0;
2371 
2372 	memset(del_streams, 0, sizeof(del_streams));
2373 
2374 	context = dc_create_state(dc);
2375 	if (context == NULL)
2376 		goto context_alloc_fail;
2377 
2378 	dc_resource_state_copy_construct_current(dc, context);
2379 
2380 	/* First remove from context all streams */
2381 	for (i = 0; i < context->stream_count; i++) {
2382 		struct dc_stream_state *stream = context->streams[i];
2383 
2384 		del_streams[del_streams_count++] = stream;
2385 	}
2386 
2387 	/* Remove all planes for removed streams and then remove the streams */
2388 	for (i = 0; i < del_streams_count; i++) {
2389 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2390 			res = DC_FAIL_DETACH_SURFACES;
2391 			goto fail;
2392 		}
2393 
2394 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2395 		if (res != DC_OK)
2396 			goto fail;
2397 	}
2398 
2399 	res = dc_commit_state(dc, context);
2400 
2401 fail:
2402 	dc_release_state(context);
2403 
2404 context_alloc_fail:
2405 	return res;
2406 }
2407 
2408 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2409 {
2410 	int i;
2411 
2412 	if (dm->hpd_rx_offload_wq) {
2413 		for (i = 0; i < dm->dc->caps.max_links; i++)
2414 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2415 	}
2416 }
2417 
2418 static int dm_suspend(void *handle)
2419 {
2420 	struct amdgpu_device *adev = handle;
2421 	struct amdgpu_display_manager *dm = &adev->dm;
2422 	int ret = 0;
2423 
2424 	if (amdgpu_in_reset(adev)) {
2425 		mutex_lock(&dm->dc_lock);
2426 
2427 		dc_allow_idle_optimizations(adev->dm.dc, false);
2428 
2429 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2430 
2431 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2432 
2433 		amdgpu_dm_commit_zero_streams(dm->dc);
2434 
2435 		amdgpu_dm_irq_suspend(adev);
2436 
2437 		hpd_rx_irq_work_suspend(dm);
2438 
2439 		return ret;
2440 	}
2441 
2442 	WARN_ON(adev->dm.cached_state);
2443 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2444 
2445 	s3_handle_mst(adev_to_drm(adev), true);
2446 
2447 	amdgpu_dm_irq_suspend(adev);
2448 
2449 	hpd_rx_irq_work_suspend(dm);
2450 
2451 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2452 
2453 	return 0;
2454 }
2455 
2456 struct amdgpu_dm_connector *
2457 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2458 					     struct drm_crtc *crtc)
2459 {
2460 	uint32_t i;
2461 	struct drm_connector_state *new_con_state;
2462 	struct drm_connector *connector;
2463 	struct drm_crtc *crtc_from_state;
2464 
2465 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2466 		crtc_from_state = new_con_state->crtc;
2467 
2468 		if (crtc_from_state == crtc)
2469 			return to_amdgpu_dm_connector(connector);
2470 	}
2471 
2472 	return NULL;
2473 }
2474 
2475 static void emulated_link_detect(struct dc_link *link)
2476 {
2477 	struct dc_sink_init_data sink_init_data = { 0 };
2478 	struct display_sink_capability sink_caps = { 0 };
2479 	enum dc_edid_status edid_status;
2480 	struct dc_context *dc_ctx = link->ctx;
2481 	struct dc_sink *sink = NULL;
2482 	struct dc_sink *prev_sink = NULL;
2483 
2484 	link->type = dc_connection_none;
2485 	prev_sink = link->local_sink;
2486 
2487 	if (prev_sink)
2488 		dc_sink_release(prev_sink);
2489 
2490 	switch (link->connector_signal) {
2491 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2492 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2493 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2494 		break;
2495 	}
2496 
2497 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2498 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2499 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2500 		break;
2501 	}
2502 
2503 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2504 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2505 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2506 		break;
2507 	}
2508 
2509 	case SIGNAL_TYPE_LVDS: {
2510 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2511 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2512 		break;
2513 	}
2514 
2515 	case SIGNAL_TYPE_EDP: {
2516 		sink_caps.transaction_type =
2517 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2518 		sink_caps.signal = SIGNAL_TYPE_EDP;
2519 		break;
2520 	}
2521 
2522 	case SIGNAL_TYPE_DISPLAY_PORT: {
2523 		sink_caps.transaction_type =
2524 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2525 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2526 		break;
2527 	}
2528 
2529 	default:
2530 		DC_ERROR("Invalid connector type! signal:%d\n",
2531 			link->connector_signal);
2532 		return;
2533 	}
2534 
2535 	sink_init_data.link = link;
2536 	sink_init_data.sink_signal = sink_caps.signal;
2537 
2538 	sink = dc_sink_create(&sink_init_data);
2539 	if (!sink) {
2540 		DC_ERROR("Failed to create sink!\n");
2541 		return;
2542 	}
2543 
2544 	/* dc_sink_create returns a new reference */
2545 	link->local_sink = sink;
2546 
2547 	edid_status = dm_helpers_read_local_edid(
2548 			link->ctx,
2549 			link,
2550 			sink);
2551 
2552 	if (edid_status != EDID_OK)
2553 		DC_ERROR("Failed to read EDID");
2554 
2555 }
2556 
2557 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2558 				     struct amdgpu_display_manager *dm)
2559 {
2560 	struct {
2561 		struct dc_surface_update surface_updates[MAX_SURFACES];
2562 		struct dc_plane_info plane_infos[MAX_SURFACES];
2563 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2564 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2565 		struct dc_stream_update stream_update;
2566 	} * bundle;
2567 	int k, m;
2568 
2569 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2570 
2571 	if (!bundle) {
2572 		dm_error("Failed to allocate update bundle\n");
2573 		goto cleanup;
2574 	}
2575 
2576 	for (k = 0; k < dc_state->stream_count; k++) {
2577 		bundle->stream_update.stream = dc_state->streams[k];
2578 
2579 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2580 			bundle->surface_updates[m].surface =
2581 				dc_state->stream_status->plane_states[m];
2582 			bundle->surface_updates[m].surface->force_full_update =
2583 				true;
2584 		}
2585 		dc_commit_updates_for_stream(
2586 			dm->dc, bundle->surface_updates,
2587 			dc_state->stream_status->plane_count,
2588 			dc_state->streams[k], &bundle->stream_update, dc_state);
2589 	}
2590 
2591 cleanup:
2592 	kfree(bundle);
2593 
2594 	return;
2595 }
2596 
2597 static int dm_resume(void *handle)
2598 {
2599 	struct amdgpu_device *adev = handle;
2600 	struct drm_device *ddev = adev_to_drm(adev);
2601 	struct amdgpu_display_manager *dm = &adev->dm;
2602 	struct amdgpu_dm_connector *aconnector;
2603 	struct drm_connector *connector;
2604 	struct drm_connector_list_iter iter;
2605 	struct drm_crtc *crtc;
2606 	struct drm_crtc_state *new_crtc_state;
2607 	struct dm_crtc_state *dm_new_crtc_state;
2608 	struct drm_plane *plane;
2609 	struct drm_plane_state *new_plane_state;
2610 	struct dm_plane_state *dm_new_plane_state;
2611 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2612 	enum dc_connection_type new_connection_type = dc_connection_none;
2613 	struct dc_state *dc_state;
2614 	int i, r, j;
2615 
2616 	if (amdgpu_in_reset(adev)) {
2617 		dc_state = dm->cached_dc_state;
2618 
2619 		/*
2620 		 * The dc->current_state is backed up into dm->cached_dc_state
2621 		 * before we commit 0 streams.
2622 		 *
2623 		 * DC will clear link encoder assignments on the real state
2624 		 * but the changes won't propagate over to the copy we made
2625 		 * before the 0 streams commit.
2626 		 *
2627 		 * DC expects that link encoder assignments are *not* valid
2628 		 * when committing a state, so as a workaround we can copy
2629 		 * off of the current state.
2630 		 *
2631 		 * We lose the previous assignments, but we had already
2632 		 * commit 0 streams anyway.
2633 		 */
2634 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2635 
2636 		r = dm_dmub_hw_init(adev);
2637 		if (r)
2638 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2639 
2640 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2641 		dc_resume(dm->dc);
2642 
2643 		amdgpu_dm_irq_resume_early(adev);
2644 
2645 		for (i = 0; i < dc_state->stream_count; i++) {
2646 			dc_state->streams[i]->mode_changed = true;
2647 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2648 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2649 					= 0xffffffff;
2650 			}
2651 		}
2652 
2653 		if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2654 			amdgpu_dm_outbox_init(adev);
2655 			dc_enable_dmub_outbox(adev->dm.dc);
2656 		}
2657 
2658 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2659 
2660 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2661 
2662 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2663 
2664 		dc_release_state(dm->cached_dc_state);
2665 		dm->cached_dc_state = NULL;
2666 
2667 		amdgpu_dm_irq_resume_late(adev);
2668 
2669 		mutex_unlock(&dm->dc_lock);
2670 
2671 		return 0;
2672 	}
2673 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2674 	dc_release_state(dm_state->context);
2675 	dm_state->context = dc_create_state(dm->dc);
2676 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2677 	dc_resource_state_construct(dm->dc, dm_state->context);
2678 
2679 	/* Before powering on DC we need to re-initialize DMUB. */
2680 	dm_dmub_hw_resume(adev);
2681 
2682 	/* Re-enable outbox interrupts for DPIA. */
2683 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2684 		amdgpu_dm_outbox_init(adev);
2685 		dc_enable_dmub_outbox(adev->dm.dc);
2686 	}
2687 
2688 	/* power on hardware */
2689 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2690 
2691 	/* program HPD filter */
2692 	dc_resume(dm->dc);
2693 
2694 	/*
2695 	 * early enable HPD Rx IRQ, should be done before set mode as short
2696 	 * pulse interrupts are used for MST
2697 	 */
2698 	amdgpu_dm_irq_resume_early(adev);
2699 
2700 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2701 	s3_handle_mst(ddev, false);
2702 
2703 	/* Do detection*/
2704 	drm_connector_list_iter_begin(ddev, &iter);
2705 	drm_for_each_connector_iter(connector, &iter) {
2706 		aconnector = to_amdgpu_dm_connector(connector);
2707 
2708 		/*
2709 		 * this is the case when traversing through already created
2710 		 * MST connectors, should be skipped
2711 		 */
2712 		if (aconnector->dc_link &&
2713 		    aconnector->dc_link->type == dc_connection_mst_branch)
2714 			continue;
2715 
2716 		mutex_lock(&aconnector->hpd_lock);
2717 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2718 			DRM_ERROR("KMS: Failed to detect connector\n");
2719 
2720 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2721 			emulated_link_detect(aconnector->dc_link);
2722 		else
2723 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2724 
2725 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2726 			aconnector->fake_enable = false;
2727 
2728 		if (aconnector->dc_sink)
2729 			dc_sink_release(aconnector->dc_sink);
2730 		aconnector->dc_sink = NULL;
2731 		amdgpu_dm_update_connector_after_detect(aconnector);
2732 		mutex_unlock(&aconnector->hpd_lock);
2733 	}
2734 	drm_connector_list_iter_end(&iter);
2735 
2736 	/* Force mode set in atomic commit */
2737 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2738 		new_crtc_state->active_changed = true;
2739 
2740 	/*
2741 	 * atomic_check is expected to create the dc states. We need to release
2742 	 * them here, since they were duplicated as part of the suspend
2743 	 * procedure.
2744 	 */
2745 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2746 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2747 		if (dm_new_crtc_state->stream) {
2748 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2749 			dc_stream_release(dm_new_crtc_state->stream);
2750 			dm_new_crtc_state->stream = NULL;
2751 		}
2752 	}
2753 
2754 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2755 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2756 		if (dm_new_plane_state->dc_state) {
2757 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2758 			dc_plane_state_release(dm_new_plane_state->dc_state);
2759 			dm_new_plane_state->dc_state = NULL;
2760 		}
2761 	}
2762 
2763 	drm_atomic_helper_resume(ddev, dm->cached_state);
2764 
2765 	dm->cached_state = NULL;
2766 
2767 	amdgpu_dm_irq_resume_late(adev);
2768 
2769 	amdgpu_dm_smu_write_watermarks_table(adev);
2770 
2771 	return 0;
2772 }
2773 
2774 /**
2775  * DOC: DM Lifecycle
2776  *
2777  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2778  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2779  * the base driver's device list to be initialized and torn down accordingly.
2780  *
2781  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2782  */
2783 
2784 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2785 	.name = "dm",
2786 	.early_init = dm_early_init,
2787 	.late_init = dm_late_init,
2788 	.sw_init = dm_sw_init,
2789 	.sw_fini = dm_sw_fini,
2790 	.early_fini = amdgpu_dm_early_fini,
2791 	.hw_init = dm_hw_init,
2792 	.hw_fini = dm_hw_fini,
2793 	.suspend = dm_suspend,
2794 	.resume = dm_resume,
2795 	.is_idle = dm_is_idle,
2796 	.wait_for_idle = dm_wait_for_idle,
2797 	.check_soft_reset = dm_check_soft_reset,
2798 	.soft_reset = dm_soft_reset,
2799 	.set_clockgating_state = dm_set_clockgating_state,
2800 	.set_powergating_state = dm_set_powergating_state,
2801 };
2802 
2803 const struct amdgpu_ip_block_version dm_ip_block =
2804 {
2805 	.type = AMD_IP_BLOCK_TYPE_DCE,
2806 	.major = 1,
2807 	.minor = 0,
2808 	.rev = 0,
2809 	.funcs = &amdgpu_dm_funcs,
2810 };
2811 
2812 
2813 /**
2814  * DOC: atomic
2815  *
2816  * *WIP*
2817  */
2818 
2819 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2820 	.fb_create = amdgpu_display_user_framebuffer_create,
2821 	.get_format_info = amd_get_format_info,
2822 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2823 	.atomic_check = amdgpu_dm_atomic_check,
2824 	.atomic_commit = drm_atomic_helper_commit,
2825 };
2826 
2827 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2828 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2829 };
2830 
2831 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2832 {
2833 	u32 max_avg, min_cll, max, min, q, r;
2834 	struct amdgpu_dm_backlight_caps *caps;
2835 	struct amdgpu_display_manager *dm;
2836 	struct drm_connector *conn_base;
2837 	struct amdgpu_device *adev;
2838 	struct dc_link *link = NULL;
2839 	static const u8 pre_computed_values[] = {
2840 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2841 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2842 	int i;
2843 
2844 	if (!aconnector || !aconnector->dc_link)
2845 		return;
2846 
2847 	link = aconnector->dc_link;
2848 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2849 		return;
2850 
2851 	conn_base = &aconnector->base;
2852 	adev = drm_to_adev(conn_base->dev);
2853 	dm = &adev->dm;
2854 	for (i = 0; i < dm->num_of_edps; i++) {
2855 		if (link == dm->backlight_link[i])
2856 			break;
2857 	}
2858 	if (i >= dm->num_of_edps)
2859 		return;
2860 	caps = &dm->backlight_caps[i];
2861 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2862 	caps->aux_support = false;
2863 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2864 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2865 
2866 	if (caps->ext_caps->bits.oled == 1 /*||
2867 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2868 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2869 		caps->aux_support = true;
2870 
2871 	if (amdgpu_backlight == 0)
2872 		caps->aux_support = false;
2873 	else if (amdgpu_backlight == 1)
2874 		caps->aux_support = true;
2875 
2876 	/* From the specification (CTA-861-G), for calculating the maximum
2877 	 * luminance we need to use:
2878 	 *	Luminance = 50*2**(CV/32)
2879 	 * Where CV is a one-byte value.
2880 	 * For calculating this expression we may need float point precision;
2881 	 * to avoid this complexity level, we take advantage that CV is divided
2882 	 * by a constant. From the Euclids division algorithm, we know that CV
2883 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2884 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2885 	 * need to pre-compute the value of r/32. For pre-computing the values
2886 	 * We just used the following Ruby line:
2887 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2888 	 * The results of the above expressions can be verified at
2889 	 * pre_computed_values.
2890 	 */
2891 	q = max_avg >> 5;
2892 	r = max_avg % 32;
2893 	max = (1 << q) * pre_computed_values[r];
2894 
2895 	// min luminance: maxLum * (CV/255)^2 / 100
2896 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2897 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2898 
2899 	caps->aux_max_input_signal = max;
2900 	caps->aux_min_input_signal = min;
2901 }
2902 
2903 void amdgpu_dm_update_connector_after_detect(
2904 		struct amdgpu_dm_connector *aconnector)
2905 {
2906 	struct drm_connector *connector = &aconnector->base;
2907 	struct drm_device *dev = connector->dev;
2908 	struct dc_sink *sink;
2909 
2910 	/* MST handled by drm_mst framework */
2911 	if (aconnector->mst_mgr.mst_state == true)
2912 		return;
2913 
2914 	sink = aconnector->dc_link->local_sink;
2915 	if (sink)
2916 		dc_sink_retain(sink);
2917 
2918 	/*
2919 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2920 	 * the connector sink is set to either fake or physical sink depends on link status.
2921 	 * Skip if already done during boot.
2922 	 */
2923 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2924 			&& aconnector->dc_em_sink) {
2925 
2926 		/*
2927 		 * For S3 resume with headless use eml_sink to fake stream
2928 		 * because on resume connector->sink is set to NULL
2929 		 */
2930 		mutex_lock(&dev->mode_config.mutex);
2931 
2932 		if (sink) {
2933 			if (aconnector->dc_sink) {
2934 				amdgpu_dm_update_freesync_caps(connector, NULL);
2935 				/*
2936 				 * retain and release below are used to
2937 				 * bump up refcount for sink because the link doesn't point
2938 				 * to it anymore after disconnect, so on next crtc to connector
2939 				 * reshuffle by UMD we will get into unwanted dc_sink release
2940 				 */
2941 				dc_sink_release(aconnector->dc_sink);
2942 			}
2943 			aconnector->dc_sink = sink;
2944 			dc_sink_retain(aconnector->dc_sink);
2945 			amdgpu_dm_update_freesync_caps(connector,
2946 					aconnector->edid);
2947 		} else {
2948 			amdgpu_dm_update_freesync_caps(connector, NULL);
2949 			if (!aconnector->dc_sink) {
2950 				aconnector->dc_sink = aconnector->dc_em_sink;
2951 				dc_sink_retain(aconnector->dc_sink);
2952 			}
2953 		}
2954 
2955 		mutex_unlock(&dev->mode_config.mutex);
2956 
2957 		if (sink)
2958 			dc_sink_release(sink);
2959 		return;
2960 	}
2961 
2962 	/*
2963 	 * TODO: temporary guard to look for proper fix
2964 	 * if this sink is MST sink, we should not do anything
2965 	 */
2966 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2967 		dc_sink_release(sink);
2968 		return;
2969 	}
2970 
2971 	if (aconnector->dc_sink == sink) {
2972 		/*
2973 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2974 		 * Do nothing!!
2975 		 */
2976 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2977 				aconnector->connector_id);
2978 		if (sink)
2979 			dc_sink_release(sink);
2980 		return;
2981 	}
2982 
2983 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2984 		aconnector->connector_id, aconnector->dc_sink, sink);
2985 
2986 	mutex_lock(&dev->mode_config.mutex);
2987 
2988 	/*
2989 	 * 1. Update status of the drm connector
2990 	 * 2. Send an event and let userspace tell us what to do
2991 	 */
2992 	if (sink) {
2993 		/*
2994 		 * TODO: check if we still need the S3 mode update workaround.
2995 		 * If yes, put it here.
2996 		 */
2997 		if (aconnector->dc_sink) {
2998 			amdgpu_dm_update_freesync_caps(connector, NULL);
2999 			dc_sink_release(aconnector->dc_sink);
3000 		}
3001 
3002 		aconnector->dc_sink = sink;
3003 		dc_sink_retain(aconnector->dc_sink);
3004 		if (sink->dc_edid.length == 0) {
3005 			aconnector->edid = NULL;
3006 			if (aconnector->dc_link->aux_mode) {
3007 				drm_dp_cec_unset_edid(
3008 					&aconnector->dm_dp_aux.aux);
3009 			}
3010 		} else {
3011 			aconnector->edid =
3012 				(struct edid *)sink->dc_edid.raw_edid;
3013 
3014 			if (aconnector->dc_link->aux_mode)
3015 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3016 						    aconnector->edid);
3017 		}
3018 
3019 		drm_connector_update_edid_property(connector, aconnector->edid);
3020 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3021 		update_connector_ext_caps(aconnector);
3022 	} else {
3023 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3024 		amdgpu_dm_update_freesync_caps(connector, NULL);
3025 		drm_connector_update_edid_property(connector, NULL);
3026 		aconnector->num_modes = 0;
3027 		dc_sink_release(aconnector->dc_sink);
3028 		aconnector->dc_sink = NULL;
3029 		aconnector->edid = NULL;
3030 #ifdef CONFIG_DRM_AMD_DC_HDCP
3031 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3032 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3033 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3034 #endif
3035 	}
3036 
3037 	mutex_unlock(&dev->mode_config.mutex);
3038 
3039 	update_subconnector_property(aconnector);
3040 
3041 	if (sink)
3042 		dc_sink_release(sink);
3043 }
3044 
3045 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3046 {
3047 	struct drm_connector *connector = &aconnector->base;
3048 	struct drm_device *dev = connector->dev;
3049 	enum dc_connection_type new_connection_type = dc_connection_none;
3050 	struct amdgpu_device *adev = drm_to_adev(dev);
3051 #ifdef CONFIG_DRM_AMD_DC_HDCP
3052 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3053 #endif
3054 
3055 	if (adev->dm.disable_hpd_irq)
3056 		return;
3057 
3058 	/*
3059 	 * In case of failure or MST no need to update connector status or notify the OS
3060 	 * since (for MST case) MST does this in its own context.
3061 	 */
3062 	mutex_lock(&aconnector->hpd_lock);
3063 
3064 #ifdef CONFIG_DRM_AMD_DC_HDCP
3065 	if (adev->dm.hdcp_workqueue) {
3066 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3067 		dm_con_state->update_hdcp = true;
3068 	}
3069 #endif
3070 	if (aconnector->fake_enable)
3071 		aconnector->fake_enable = false;
3072 
3073 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3074 		DRM_ERROR("KMS: Failed to detect connector\n");
3075 
3076 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3077 		emulated_link_detect(aconnector->dc_link);
3078 
3079 		drm_modeset_lock_all(dev);
3080 		dm_restore_drm_connector_state(dev, connector);
3081 		drm_modeset_unlock_all(dev);
3082 
3083 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3084 			drm_kms_helper_connector_hotplug_event(connector);
3085 
3086 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3087 		amdgpu_dm_update_connector_after_detect(aconnector);
3088 
3089 		drm_modeset_lock_all(dev);
3090 		dm_restore_drm_connector_state(dev, connector);
3091 		drm_modeset_unlock_all(dev);
3092 
3093 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3094 			drm_kms_helper_connector_hotplug_event(connector);
3095 	}
3096 	mutex_unlock(&aconnector->hpd_lock);
3097 
3098 }
3099 
3100 static void handle_hpd_irq(void *param)
3101 {
3102 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3103 
3104 	handle_hpd_irq_helper(aconnector);
3105 
3106 }
3107 
3108 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3109 {
3110 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3111 	uint8_t dret;
3112 	bool new_irq_handled = false;
3113 	int dpcd_addr;
3114 	int dpcd_bytes_to_read;
3115 
3116 	const int max_process_count = 30;
3117 	int process_count = 0;
3118 
3119 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3120 
3121 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3122 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3123 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3124 		dpcd_addr = DP_SINK_COUNT;
3125 	} else {
3126 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3127 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3128 		dpcd_addr = DP_SINK_COUNT_ESI;
3129 	}
3130 
3131 	dret = drm_dp_dpcd_read(
3132 		&aconnector->dm_dp_aux.aux,
3133 		dpcd_addr,
3134 		esi,
3135 		dpcd_bytes_to_read);
3136 
3137 	while (dret == dpcd_bytes_to_read &&
3138 		process_count < max_process_count) {
3139 		uint8_t retry;
3140 		dret = 0;
3141 
3142 		process_count++;
3143 
3144 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3145 		/* handle HPD short pulse irq */
3146 		if (aconnector->mst_mgr.mst_state)
3147 			drm_dp_mst_hpd_irq(
3148 				&aconnector->mst_mgr,
3149 				esi,
3150 				&new_irq_handled);
3151 
3152 		if (new_irq_handled) {
3153 			/* ACK at DPCD to notify down stream */
3154 			const int ack_dpcd_bytes_to_write =
3155 				dpcd_bytes_to_read - 1;
3156 
3157 			for (retry = 0; retry < 3; retry++) {
3158 				uint8_t wret;
3159 
3160 				wret = drm_dp_dpcd_write(
3161 					&aconnector->dm_dp_aux.aux,
3162 					dpcd_addr + 1,
3163 					&esi[1],
3164 					ack_dpcd_bytes_to_write);
3165 				if (wret == ack_dpcd_bytes_to_write)
3166 					break;
3167 			}
3168 
3169 			/* check if there is new irq to be handled */
3170 			dret = drm_dp_dpcd_read(
3171 				&aconnector->dm_dp_aux.aux,
3172 				dpcd_addr,
3173 				esi,
3174 				dpcd_bytes_to_read);
3175 
3176 			new_irq_handled = false;
3177 		} else {
3178 			break;
3179 		}
3180 	}
3181 
3182 	if (process_count == max_process_count)
3183 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3184 }
3185 
3186 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3187 							union hpd_irq_data hpd_irq_data)
3188 {
3189 	struct hpd_rx_irq_offload_work *offload_work =
3190 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3191 
3192 	if (!offload_work) {
3193 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3194 		return;
3195 	}
3196 
3197 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3198 	offload_work->data = hpd_irq_data;
3199 	offload_work->offload_wq = offload_wq;
3200 
3201 	queue_work(offload_wq->wq, &offload_work->work);
3202 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3203 }
3204 
3205 static void handle_hpd_rx_irq(void *param)
3206 {
3207 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3208 	struct drm_connector *connector = &aconnector->base;
3209 	struct drm_device *dev = connector->dev;
3210 	struct dc_link *dc_link = aconnector->dc_link;
3211 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3212 	bool result = false;
3213 	enum dc_connection_type new_connection_type = dc_connection_none;
3214 	struct amdgpu_device *adev = drm_to_adev(dev);
3215 	union hpd_irq_data hpd_irq_data;
3216 	bool link_loss = false;
3217 	bool has_left_work = false;
3218 	int idx = aconnector->base.index;
3219 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3220 
3221 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3222 
3223 	if (adev->dm.disable_hpd_irq)
3224 		return;
3225 
3226 	/*
3227 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3228 	 * conflict, after implement i2c helper, this mutex should be
3229 	 * retired.
3230 	 */
3231 	mutex_lock(&aconnector->hpd_lock);
3232 
3233 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3234 						&link_loss, true, &has_left_work);
3235 
3236 	if (!has_left_work)
3237 		goto out;
3238 
3239 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3240 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3241 		goto out;
3242 	}
3243 
3244 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3245 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3246 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3247 			dm_handle_mst_sideband_msg(aconnector);
3248 			goto out;
3249 		}
3250 
3251 		if (link_loss) {
3252 			bool skip = false;
3253 
3254 			spin_lock(&offload_wq->offload_lock);
3255 			skip = offload_wq->is_handling_link_loss;
3256 
3257 			if (!skip)
3258 				offload_wq->is_handling_link_loss = true;
3259 
3260 			spin_unlock(&offload_wq->offload_lock);
3261 
3262 			if (!skip)
3263 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3264 
3265 			goto out;
3266 		}
3267 	}
3268 
3269 out:
3270 	if (result && !is_mst_root_connector) {
3271 		/* Downstream Port status changed. */
3272 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3273 			DRM_ERROR("KMS: Failed to detect connector\n");
3274 
3275 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3276 			emulated_link_detect(dc_link);
3277 
3278 			if (aconnector->fake_enable)
3279 				aconnector->fake_enable = false;
3280 
3281 			amdgpu_dm_update_connector_after_detect(aconnector);
3282 
3283 
3284 			drm_modeset_lock_all(dev);
3285 			dm_restore_drm_connector_state(dev, connector);
3286 			drm_modeset_unlock_all(dev);
3287 
3288 			drm_kms_helper_connector_hotplug_event(connector);
3289 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3290 
3291 			if (aconnector->fake_enable)
3292 				aconnector->fake_enable = false;
3293 
3294 			amdgpu_dm_update_connector_after_detect(aconnector);
3295 
3296 
3297 			drm_modeset_lock_all(dev);
3298 			dm_restore_drm_connector_state(dev, connector);
3299 			drm_modeset_unlock_all(dev);
3300 
3301 			drm_kms_helper_connector_hotplug_event(connector);
3302 		}
3303 	}
3304 #ifdef CONFIG_DRM_AMD_DC_HDCP
3305 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3306 		if (adev->dm.hdcp_workqueue)
3307 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3308 	}
3309 #endif
3310 
3311 	if (dc_link->type != dc_connection_mst_branch)
3312 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3313 
3314 	mutex_unlock(&aconnector->hpd_lock);
3315 }
3316 
3317 static void register_hpd_handlers(struct amdgpu_device *adev)
3318 {
3319 	struct drm_device *dev = adev_to_drm(adev);
3320 	struct drm_connector *connector;
3321 	struct amdgpu_dm_connector *aconnector;
3322 	const struct dc_link *dc_link;
3323 	struct dc_interrupt_params int_params = {0};
3324 
3325 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3326 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3327 
3328 	list_for_each_entry(connector,
3329 			&dev->mode_config.connector_list, head)	{
3330 
3331 		aconnector = to_amdgpu_dm_connector(connector);
3332 		dc_link = aconnector->dc_link;
3333 
3334 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3335 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3336 			int_params.irq_source = dc_link->irq_source_hpd;
3337 
3338 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3339 					handle_hpd_irq,
3340 					(void *) aconnector);
3341 		}
3342 
3343 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3344 
3345 			/* Also register for DP short pulse (hpd_rx). */
3346 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3347 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3348 
3349 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3350 					handle_hpd_rx_irq,
3351 					(void *) aconnector);
3352 
3353 			if (adev->dm.hpd_rx_offload_wq)
3354 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3355 					aconnector;
3356 		}
3357 	}
3358 }
3359 
3360 #if defined(CONFIG_DRM_AMD_DC_SI)
3361 /* Register IRQ sources and initialize IRQ callbacks */
3362 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3363 {
3364 	struct dc *dc = adev->dm.dc;
3365 	struct common_irq_params *c_irq_params;
3366 	struct dc_interrupt_params int_params = {0};
3367 	int r;
3368 	int i;
3369 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3370 
3371 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3372 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3373 
3374 	/*
3375 	 * Actions of amdgpu_irq_add_id():
3376 	 * 1. Register a set() function with base driver.
3377 	 *    Base driver will call set() function to enable/disable an
3378 	 *    interrupt in DC hardware.
3379 	 * 2. Register amdgpu_dm_irq_handler().
3380 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3381 	 *    coming from DC hardware.
3382 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3383 	 *    for acknowledging and handling. */
3384 
3385 	/* Use VBLANK interrupt */
3386 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3387 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3388 		if (r) {
3389 			DRM_ERROR("Failed to add crtc irq id!\n");
3390 			return r;
3391 		}
3392 
3393 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3394 		int_params.irq_source =
3395 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3396 
3397 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3398 
3399 		c_irq_params->adev = adev;
3400 		c_irq_params->irq_src = int_params.irq_source;
3401 
3402 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3403 				dm_crtc_high_irq, c_irq_params);
3404 	}
3405 
3406 	/* Use GRPH_PFLIP interrupt */
3407 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3408 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3409 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3410 		if (r) {
3411 			DRM_ERROR("Failed to add page flip irq id!\n");
3412 			return r;
3413 		}
3414 
3415 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3416 		int_params.irq_source =
3417 			dc_interrupt_to_irq_source(dc, i, 0);
3418 
3419 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3420 
3421 		c_irq_params->adev = adev;
3422 		c_irq_params->irq_src = int_params.irq_source;
3423 
3424 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3425 				dm_pflip_high_irq, c_irq_params);
3426 
3427 	}
3428 
3429 	/* HPD */
3430 	r = amdgpu_irq_add_id(adev, client_id,
3431 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3432 	if (r) {
3433 		DRM_ERROR("Failed to add hpd irq id!\n");
3434 		return r;
3435 	}
3436 
3437 	register_hpd_handlers(adev);
3438 
3439 	return 0;
3440 }
3441 #endif
3442 
3443 /* Register IRQ sources and initialize IRQ callbacks */
3444 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3445 {
3446 	struct dc *dc = adev->dm.dc;
3447 	struct common_irq_params *c_irq_params;
3448 	struct dc_interrupt_params int_params = {0};
3449 	int r;
3450 	int i;
3451 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3452 
3453 	if (adev->family >= AMDGPU_FAMILY_AI)
3454 		client_id = SOC15_IH_CLIENTID_DCE;
3455 
3456 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3457 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3458 
3459 	/*
3460 	 * Actions of amdgpu_irq_add_id():
3461 	 * 1. Register a set() function with base driver.
3462 	 *    Base driver will call set() function to enable/disable an
3463 	 *    interrupt in DC hardware.
3464 	 * 2. Register amdgpu_dm_irq_handler().
3465 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3466 	 *    coming from DC hardware.
3467 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3468 	 *    for acknowledging and handling. */
3469 
3470 	/* Use VBLANK interrupt */
3471 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3472 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3473 		if (r) {
3474 			DRM_ERROR("Failed to add crtc irq id!\n");
3475 			return r;
3476 		}
3477 
3478 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3479 		int_params.irq_source =
3480 			dc_interrupt_to_irq_source(dc, i, 0);
3481 
3482 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3483 
3484 		c_irq_params->adev = adev;
3485 		c_irq_params->irq_src = int_params.irq_source;
3486 
3487 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3488 				dm_crtc_high_irq, c_irq_params);
3489 	}
3490 
3491 	/* Use VUPDATE interrupt */
3492 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3493 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3494 		if (r) {
3495 			DRM_ERROR("Failed to add vupdate irq id!\n");
3496 			return r;
3497 		}
3498 
3499 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3500 		int_params.irq_source =
3501 			dc_interrupt_to_irq_source(dc, i, 0);
3502 
3503 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3504 
3505 		c_irq_params->adev = adev;
3506 		c_irq_params->irq_src = int_params.irq_source;
3507 
3508 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3509 				dm_vupdate_high_irq, c_irq_params);
3510 	}
3511 
3512 	/* Use GRPH_PFLIP interrupt */
3513 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3514 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3515 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3516 		if (r) {
3517 			DRM_ERROR("Failed to add page flip irq id!\n");
3518 			return r;
3519 		}
3520 
3521 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3522 		int_params.irq_source =
3523 			dc_interrupt_to_irq_source(dc, i, 0);
3524 
3525 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3526 
3527 		c_irq_params->adev = adev;
3528 		c_irq_params->irq_src = int_params.irq_source;
3529 
3530 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3531 				dm_pflip_high_irq, c_irq_params);
3532 
3533 	}
3534 
3535 	/* HPD */
3536 	r = amdgpu_irq_add_id(adev, client_id,
3537 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3538 	if (r) {
3539 		DRM_ERROR("Failed to add hpd irq id!\n");
3540 		return r;
3541 	}
3542 
3543 	register_hpd_handlers(adev);
3544 
3545 	return 0;
3546 }
3547 
3548 /* Register IRQ sources and initialize IRQ callbacks */
3549 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3550 {
3551 	struct dc *dc = adev->dm.dc;
3552 	struct common_irq_params *c_irq_params;
3553 	struct dc_interrupt_params int_params = {0};
3554 	int r;
3555 	int i;
3556 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3557 	static const unsigned int vrtl_int_srcid[] = {
3558 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3559 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3560 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3561 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3562 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3563 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3564 	};
3565 #endif
3566 
3567 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3568 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3569 
3570 	/*
3571 	 * Actions of amdgpu_irq_add_id():
3572 	 * 1. Register a set() function with base driver.
3573 	 *    Base driver will call set() function to enable/disable an
3574 	 *    interrupt in DC hardware.
3575 	 * 2. Register amdgpu_dm_irq_handler().
3576 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3577 	 *    coming from DC hardware.
3578 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3579 	 *    for acknowledging and handling.
3580 	 */
3581 
3582 	/* Use VSTARTUP interrupt */
3583 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3584 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3585 			i++) {
3586 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3587 
3588 		if (r) {
3589 			DRM_ERROR("Failed to add crtc irq id!\n");
3590 			return r;
3591 		}
3592 
3593 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3594 		int_params.irq_source =
3595 			dc_interrupt_to_irq_source(dc, i, 0);
3596 
3597 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3598 
3599 		c_irq_params->adev = adev;
3600 		c_irq_params->irq_src = int_params.irq_source;
3601 
3602 		amdgpu_dm_irq_register_interrupt(
3603 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3604 	}
3605 
3606 	/* Use otg vertical line interrupt */
3607 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3608 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3609 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3610 				vrtl_int_srcid[i], &adev->vline0_irq);
3611 
3612 		if (r) {
3613 			DRM_ERROR("Failed to add vline0 irq id!\n");
3614 			return r;
3615 		}
3616 
3617 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3618 		int_params.irq_source =
3619 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3620 
3621 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3622 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3623 			break;
3624 		}
3625 
3626 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3627 					- DC_IRQ_SOURCE_DC1_VLINE0];
3628 
3629 		c_irq_params->adev = adev;
3630 		c_irq_params->irq_src = int_params.irq_source;
3631 
3632 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3633 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3634 	}
3635 #endif
3636 
3637 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3638 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3639 	 * to trigger at end of each vblank, regardless of state of the lock,
3640 	 * matching DCE behaviour.
3641 	 */
3642 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3643 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3644 	     i++) {
3645 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3646 
3647 		if (r) {
3648 			DRM_ERROR("Failed to add vupdate irq id!\n");
3649 			return r;
3650 		}
3651 
3652 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3653 		int_params.irq_source =
3654 			dc_interrupt_to_irq_source(dc, i, 0);
3655 
3656 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3657 
3658 		c_irq_params->adev = adev;
3659 		c_irq_params->irq_src = int_params.irq_source;
3660 
3661 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3662 				dm_vupdate_high_irq, c_irq_params);
3663 	}
3664 
3665 	/* Use GRPH_PFLIP interrupt */
3666 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3667 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3668 			i++) {
3669 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3670 		if (r) {
3671 			DRM_ERROR("Failed to add page flip irq id!\n");
3672 			return r;
3673 		}
3674 
3675 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3676 		int_params.irq_source =
3677 			dc_interrupt_to_irq_source(dc, i, 0);
3678 
3679 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3680 
3681 		c_irq_params->adev = adev;
3682 		c_irq_params->irq_src = int_params.irq_source;
3683 
3684 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3685 				dm_pflip_high_irq, c_irq_params);
3686 
3687 	}
3688 
3689 	/* HPD */
3690 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3691 			&adev->hpd_irq);
3692 	if (r) {
3693 		DRM_ERROR("Failed to add hpd irq id!\n");
3694 		return r;
3695 	}
3696 
3697 	register_hpd_handlers(adev);
3698 
3699 	return 0;
3700 }
3701 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3702 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3703 {
3704 	struct dc *dc = adev->dm.dc;
3705 	struct common_irq_params *c_irq_params;
3706 	struct dc_interrupt_params int_params = {0};
3707 	int r, i;
3708 
3709 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3710 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3711 
3712 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3713 			&adev->dmub_outbox_irq);
3714 	if (r) {
3715 		DRM_ERROR("Failed to add outbox irq id!\n");
3716 		return r;
3717 	}
3718 
3719 	if (dc->ctx->dmub_srv) {
3720 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3721 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3722 		int_params.irq_source =
3723 		dc_interrupt_to_irq_source(dc, i, 0);
3724 
3725 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3726 
3727 		c_irq_params->adev = adev;
3728 		c_irq_params->irq_src = int_params.irq_source;
3729 
3730 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3731 				dm_dmub_outbox1_low_irq, c_irq_params);
3732 	}
3733 
3734 	return 0;
3735 }
3736 
3737 /*
3738  * Acquires the lock for the atomic state object and returns
3739  * the new atomic state.
3740  *
3741  * This should only be called during atomic check.
3742  */
3743 int dm_atomic_get_state(struct drm_atomic_state *state,
3744 			struct dm_atomic_state **dm_state)
3745 {
3746 	struct drm_device *dev = state->dev;
3747 	struct amdgpu_device *adev = drm_to_adev(dev);
3748 	struct amdgpu_display_manager *dm = &adev->dm;
3749 	struct drm_private_state *priv_state;
3750 
3751 	if (*dm_state)
3752 		return 0;
3753 
3754 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3755 	if (IS_ERR(priv_state))
3756 		return PTR_ERR(priv_state);
3757 
3758 	*dm_state = to_dm_atomic_state(priv_state);
3759 
3760 	return 0;
3761 }
3762 
3763 static struct dm_atomic_state *
3764 dm_atomic_get_new_state(struct drm_atomic_state *state)
3765 {
3766 	struct drm_device *dev = state->dev;
3767 	struct amdgpu_device *adev = drm_to_adev(dev);
3768 	struct amdgpu_display_manager *dm = &adev->dm;
3769 	struct drm_private_obj *obj;
3770 	struct drm_private_state *new_obj_state;
3771 	int i;
3772 
3773 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3774 		if (obj->funcs == dm->atomic_obj.funcs)
3775 			return to_dm_atomic_state(new_obj_state);
3776 	}
3777 
3778 	return NULL;
3779 }
3780 
3781 static struct drm_private_state *
3782 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3783 {
3784 	struct dm_atomic_state *old_state, *new_state;
3785 
3786 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3787 	if (!new_state)
3788 		return NULL;
3789 
3790 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3791 
3792 	old_state = to_dm_atomic_state(obj->state);
3793 
3794 	if (old_state && old_state->context)
3795 		new_state->context = dc_copy_state(old_state->context);
3796 
3797 	if (!new_state->context) {
3798 		kfree(new_state);
3799 		return NULL;
3800 	}
3801 
3802 	return &new_state->base;
3803 }
3804 
3805 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3806 				    struct drm_private_state *state)
3807 {
3808 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3809 
3810 	if (dm_state && dm_state->context)
3811 		dc_release_state(dm_state->context);
3812 
3813 	kfree(dm_state);
3814 }
3815 
3816 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3817 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3818 	.atomic_destroy_state = dm_atomic_destroy_state,
3819 };
3820 
3821 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3822 {
3823 	struct dm_atomic_state *state;
3824 	int r;
3825 
3826 	adev->mode_info.mode_config_initialized = true;
3827 
3828 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3829 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3830 
3831 	adev_to_drm(adev)->mode_config.max_width = 16384;
3832 	adev_to_drm(adev)->mode_config.max_height = 16384;
3833 
3834 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3835 	/* disable prefer shadow for now due to hibernation issues */
3836 	adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3837 	/* indicates support for immediate flip */
3838 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3839 
3840 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3841 
3842 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3843 	if (!state)
3844 		return -ENOMEM;
3845 
3846 	state->context = dc_create_state(adev->dm.dc);
3847 	if (!state->context) {
3848 		kfree(state);
3849 		return -ENOMEM;
3850 	}
3851 
3852 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3853 
3854 	drm_atomic_private_obj_init(adev_to_drm(adev),
3855 				    &adev->dm.atomic_obj,
3856 				    &state->base,
3857 				    &dm_atomic_state_funcs);
3858 
3859 	r = amdgpu_display_modeset_create_props(adev);
3860 	if (r) {
3861 		dc_release_state(state->context);
3862 		kfree(state);
3863 		return r;
3864 	}
3865 
3866 	r = amdgpu_dm_audio_init(adev);
3867 	if (r) {
3868 		dc_release_state(state->context);
3869 		kfree(state);
3870 		return r;
3871 	}
3872 
3873 	return 0;
3874 }
3875 
3876 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3877 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3878 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3879 
3880 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3881 					    int bl_idx)
3882 {
3883 #if defined(CONFIG_ACPI)
3884 	struct amdgpu_dm_backlight_caps caps;
3885 
3886 	memset(&caps, 0, sizeof(caps));
3887 
3888 	if (dm->backlight_caps[bl_idx].caps_valid)
3889 		return;
3890 
3891 	amdgpu_acpi_get_backlight_caps(&caps);
3892 	if (caps.caps_valid) {
3893 		dm->backlight_caps[bl_idx].caps_valid = true;
3894 		if (caps.aux_support)
3895 			return;
3896 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3897 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3898 	} else {
3899 		dm->backlight_caps[bl_idx].min_input_signal =
3900 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3901 		dm->backlight_caps[bl_idx].max_input_signal =
3902 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3903 	}
3904 #else
3905 	if (dm->backlight_caps[bl_idx].aux_support)
3906 		return;
3907 
3908 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3909 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3910 #endif
3911 }
3912 
3913 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3914 				unsigned *min, unsigned *max)
3915 {
3916 	if (!caps)
3917 		return 0;
3918 
3919 	if (caps->aux_support) {
3920 		// Firmware limits are in nits, DC API wants millinits.
3921 		*max = 1000 * caps->aux_max_input_signal;
3922 		*min = 1000 * caps->aux_min_input_signal;
3923 	} else {
3924 		// Firmware limits are 8-bit, PWM control is 16-bit.
3925 		*max = 0x101 * caps->max_input_signal;
3926 		*min = 0x101 * caps->min_input_signal;
3927 	}
3928 	return 1;
3929 }
3930 
3931 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3932 					uint32_t brightness)
3933 {
3934 	unsigned min, max;
3935 
3936 	if (!get_brightness_range(caps, &min, &max))
3937 		return brightness;
3938 
3939 	// Rescale 0..255 to min..max
3940 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3941 				       AMDGPU_MAX_BL_LEVEL);
3942 }
3943 
3944 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3945 				      uint32_t brightness)
3946 {
3947 	unsigned min, max;
3948 
3949 	if (!get_brightness_range(caps, &min, &max))
3950 		return brightness;
3951 
3952 	if (brightness < min)
3953 		return 0;
3954 	// Rescale min..max to 0..255
3955 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3956 				 max - min);
3957 }
3958 
3959 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3960 					 int bl_idx,
3961 					 u32 user_brightness)
3962 {
3963 	struct amdgpu_dm_backlight_caps caps;
3964 	struct dc_link *link;
3965 	u32 brightness;
3966 	bool rc;
3967 
3968 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3969 	caps = dm->backlight_caps[bl_idx];
3970 
3971 	dm->brightness[bl_idx] = user_brightness;
3972 	/* update scratch register */
3973 	if (bl_idx == 0)
3974 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3975 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3976 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3977 
3978 	/* Change brightness based on AUX property */
3979 	if (caps.aux_support) {
3980 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3981 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3982 		if (!rc)
3983 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3984 	} else {
3985 		rc = dc_link_set_backlight_level(link, brightness, 0);
3986 		if (!rc)
3987 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3988 	}
3989 
3990 	if (rc)
3991 		dm->actual_brightness[bl_idx] = user_brightness;
3992 }
3993 
3994 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3995 {
3996 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3997 	int i;
3998 
3999 	for (i = 0; i < dm->num_of_edps; i++) {
4000 		if (bd == dm->backlight_dev[i])
4001 			break;
4002 	}
4003 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4004 		i = 0;
4005 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4006 
4007 	return 0;
4008 }
4009 
4010 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4011 					 int bl_idx)
4012 {
4013 	struct amdgpu_dm_backlight_caps caps;
4014 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4015 
4016 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4017 	caps = dm->backlight_caps[bl_idx];
4018 
4019 	if (caps.aux_support) {
4020 		u32 avg, peak;
4021 		bool rc;
4022 
4023 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4024 		if (!rc)
4025 			return dm->brightness[bl_idx];
4026 		return convert_brightness_to_user(&caps, avg);
4027 	} else {
4028 		int ret = dc_link_get_backlight_level(link);
4029 
4030 		if (ret == DC_ERROR_UNEXPECTED)
4031 			return dm->brightness[bl_idx];
4032 		return convert_brightness_to_user(&caps, ret);
4033 	}
4034 }
4035 
4036 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4037 {
4038 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4039 	int i;
4040 
4041 	for (i = 0; i < dm->num_of_edps; i++) {
4042 		if (bd == dm->backlight_dev[i])
4043 			break;
4044 	}
4045 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4046 		i = 0;
4047 	return amdgpu_dm_backlight_get_level(dm, i);
4048 }
4049 
4050 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4051 	.options = BL_CORE_SUSPENDRESUME,
4052 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4053 	.update_status	= amdgpu_dm_backlight_update_status,
4054 };
4055 
4056 static void
4057 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4058 {
4059 	char bl_name[16];
4060 	struct backlight_properties props = { 0 };
4061 
4062 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4063 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4064 
4065 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4066 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4067 	props.type = BACKLIGHT_RAW;
4068 
4069 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4070 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4071 
4072 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4073 								       adev_to_drm(dm->adev)->dev,
4074 								       dm,
4075 								       &amdgpu_dm_backlight_ops,
4076 								       &props);
4077 
4078 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4079 		DRM_ERROR("DM: Backlight registration failed!\n");
4080 	else
4081 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4082 }
4083 
4084 static int initialize_plane(struct amdgpu_display_manager *dm,
4085 			    struct amdgpu_mode_info *mode_info, int plane_id,
4086 			    enum drm_plane_type plane_type,
4087 			    const struct dc_plane_cap *plane_cap)
4088 {
4089 	struct drm_plane *plane;
4090 	unsigned long possible_crtcs;
4091 	int ret = 0;
4092 
4093 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4094 	if (!plane) {
4095 		DRM_ERROR("KMS: Failed to allocate plane\n");
4096 		return -ENOMEM;
4097 	}
4098 	plane->type = plane_type;
4099 
4100 	/*
4101 	 * HACK: IGT tests expect that the primary plane for a CRTC
4102 	 * can only have one possible CRTC. Only expose support for
4103 	 * any CRTC if they're not going to be used as a primary plane
4104 	 * for a CRTC - like overlay or underlay planes.
4105 	 */
4106 	possible_crtcs = 1 << plane_id;
4107 	if (plane_id >= dm->dc->caps.max_streams)
4108 		possible_crtcs = 0xff;
4109 
4110 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4111 
4112 	if (ret) {
4113 		DRM_ERROR("KMS: Failed to initialize plane\n");
4114 		kfree(plane);
4115 		return ret;
4116 	}
4117 
4118 	if (mode_info)
4119 		mode_info->planes[plane_id] = plane;
4120 
4121 	return ret;
4122 }
4123 
4124 
4125 static void register_backlight_device(struct amdgpu_display_manager *dm,
4126 				      struct dc_link *link)
4127 {
4128 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4129 	    link->type != dc_connection_none) {
4130 		/*
4131 		 * Event if registration failed, we should continue with
4132 		 * DM initialization because not having a backlight control
4133 		 * is better then a black screen.
4134 		 */
4135 		if (!dm->backlight_dev[dm->num_of_edps])
4136 			amdgpu_dm_register_backlight_device(dm);
4137 
4138 		if (dm->backlight_dev[dm->num_of_edps]) {
4139 			dm->backlight_link[dm->num_of_edps] = link;
4140 			dm->num_of_edps++;
4141 		}
4142 	}
4143 }
4144 
4145 
4146 /*
4147  * In this architecture, the association
4148  * connector -> encoder -> crtc
4149  * id not really requried. The crtc and connector will hold the
4150  * display_index as an abstraction to use with DAL component
4151  *
4152  * Returns 0 on success
4153  */
4154 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4155 {
4156 	struct amdgpu_display_manager *dm = &adev->dm;
4157 	int32_t i;
4158 	struct amdgpu_dm_connector *aconnector = NULL;
4159 	struct amdgpu_encoder *aencoder = NULL;
4160 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4161 	uint32_t link_cnt;
4162 	int32_t primary_planes;
4163 	enum dc_connection_type new_connection_type = dc_connection_none;
4164 	const struct dc_plane_cap *plane;
4165 	bool psr_feature_enabled = false;
4166 
4167 	dm->display_indexes_num = dm->dc->caps.max_streams;
4168 	/* Update the actual used number of crtc */
4169 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4170 
4171 	link_cnt = dm->dc->caps.max_links;
4172 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4173 		DRM_ERROR("DM: Failed to initialize mode config\n");
4174 		return -EINVAL;
4175 	}
4176 
4177 	/* There is one primary plane per CRTC */
4178 	primary_planes = dm->dc->caps.max_streams;
4179 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4180 
4181 	/*
4182 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4183 	 * Order is reversed to match iteration order in atomic check.
4184 	 */
4185 	for (i = (primary_planes - 1); i >= 0; i--) {
4186 		plane = &dm->dc->caps.planes[i];
4187 
4188 		if (initialize_plane(dm, mode_info, i,
4189 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4190 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4191 			goto fail;
4192 		}
4193 	}
4194 
4195 	/*
4196 	 * Initialize overlay planes, index starting after primary planes.
4197 	 * These planes have a higher DRM index than the primary planes since
4198 	 * they should be considered as having a higher z-order.
4199 	 * Order is reversed to match iteration order in atomic check.
4200 	 *
4201 	 * Only support DCN for now, and only expose one so we don't encourage
4202 	 * userspace to use up all the pipes.
4203 	 */
4204 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4205 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4206 
4207 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4208 			continue;
4209 
4210 		if (!plane->blends_with_above || !plane->blends_with_below)
4211 			continue;
4212 
4213 		if (!plane->pixel_format_support.argb8888)
4214 			continue;
4215 
4216 		if (initialize_plane(dm, NULL, primary_planes + i,
4217 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4218 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4219 			goto fail;
4220 		}
4221 
4222 		/* Only create one overlay plane. */
4223 		break;
4224 	}
4225 
4226 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4227 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4228 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4229 			goto fail;
4230 		}
4231 
4232 	/* Use Outbox interrupt */
4233 	switch (adev->ip_versions[DCE_HWIP][0]) {
4234 	case IP_VERSION(3, 0, 0):
4235 	case IP_VERSION(3, 1, 2):
4236 	case IP_VERSION(3, 1, 3):
4237 	case IP_VERSION(3, 1, 5):
4238 	case IP_VERSION(3, 1, 6):
4239 	case IP_VERSION(3, 2, 0):
4240 	case IP_VERSION(3, 2, 1):
4241 	case IP_VERSION(2, 1, 0):
4242 		if (register_outbox_irq_handlers(dm->adev)) {
4243 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4244 			goto fail;
4245 		}
4246 		break;
4247 	default:
4248 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4249 			      adev->ip_versions[DCE_HWIP][0]);
4250 	}
4251 
4252 	/* Determine whether to enable PSR support by default. */
4253 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4254 		switch (adev->ip_versions[DCE_HWIP][0]) {
4255 		case IP_VERSION(3, 1, 2):
4256 		case IP_VERSION(3, 1, 3):
4257 		case IP_VERSION(3, 1, 5):
4258 		case IP_VERSION(3, 1, 6):
4259 		case IP_VERSION(3, 2, 0):
4260 		case IP_VERSION(3, 2, 1):
4261 			psr_feature_enabled = true;
4262 			break;
4263 		default:
4264 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4265 			break;
4266 		}
4267 	}
4268 
4269 	/* loops over all connectors on the board */
4270 	for (i = 0; i < link_cnt; i++) {
4271 		struct dc_link *link = NULL;
4272 
4273 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4274 			DRM_ERROR(
4275 				"KMS: Cannot support more than %d display indexes\n",
4276 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4277 			continue;
4278 		}
4279 
4280 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4281 		if (!aconnector)
4282 			goto fail;
4283 
4284 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4285 		if (!aencoder)
4286 			goto fail;
4287 
4288 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4289 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4290 			goto fail;
4291 		}
4292 
4293 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4294 			DRM_ERROR("KMS: Failed to initialize connector\n");
4295 			goto fail;
4296 		}
4297 
4298 		link = dc_get_link_at_index(dm->dc, i);
4299 
4300 		if (!dc_link_detect_sink(link, &new_connection_type))
4301 			DRM_ERROR("KMS: Failed to detect connector\n");
4302 
4303 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4304 			emulated_link_detect(link);
4305 			amdgpu_dm_update_connector_after_detect(aconnector);
4306 
4307 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4308 			amdgpu_dm_update_connector_after_detect(aconnector);
4309 			register_backlight_device(dm, link);
4310 			if (dm->num_of_edps)
4311 				update_connector_ext_caps(aconnector);
4312 			if (psr_feature_enabled)
4313 				amdgpu_dm_set_psr_caps(link);
4314 
4315 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4316 			 * PSR is also supported.
4317 			 */
4318 			if (link->psr_settings.psr_feature_enabled)
4319 				adev_to_drm(adev)->vblank_disable_immediate = false;
4320 		}
4321 
4322 
4323 	}
4324 
4325 	/* Software is initialized. Now we can register interrupt handlers. */
4326 	switch (adev->asic_type) {
4327 #if defined(CONFIG_DRM_AMD_DC_SI)
4328 	case CHIP_TAHITI:
4329 	case CHIP_PITCAIRN:
4330 	case CHIP_VERDE:
4331 	case CHIP_OLAND:
4332 		if (dce60_register_irq_handlers(dm->adev)) {
4333 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4334 			goto fail;
4335 		}
4336 		break;
4337 #endif
4338 	case CHIP_BONAIRE:
4339 	case CHIP_HAWAII:
4340 	case CHIP_KAVERI:
4341 	case CHIP_KABINI:
4342 	case CHIP_MULLINS:
4343 	case CHIP_TONGA:
4344 	case CHIP_FIJI:
4345 	case CHIP_CARRIZO:
4346 	case CHIP_STONEY:
4347 	case CHIP_POLARIS11:
4348 	case CHIP_POLARIS10:
4349 	case CHIP_POLARIS12:
4350 	case CHIP_VEGAM:
4351 	case CHIP_VEGA10:
4352 	case CHIP_VEGA12:
4353 	case CHIP_VEGA20:
4354 		if (dce110_register_irq_handlers(dm->adev)) {
4355 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4356 			goto fail;
4357 		}
4358 		break;
4359 	default:
4360 		switch (adev->ip_versions[DCE_HWIP][0]) {
4361 		case IP_VERSION(1, 0, 0):
4362 		case IP_VERSION(1, 0, 1):
4363 		case IP_VERSION(2, 0, 2):
4364 		case IP_VERSION(2, 0, 3):
4365 		case IP_VERSION(2, 0, 0):
4366 		case IP_VERSION(2, 1, 0):
4367 		case IP_VERSION(3, 0, 0):
4368 		case IP_VERSION(3, 0, 2):
4369 		case IP_VERSION(3, 0, 3):
4370 		case IP_VERSION(3, 0, 1):
4371 		case IP_VERSION(3, 1, 2):
4372 		case IP_VERSION(3, 1, 3):
4373 		case IP_VERSION(3, 1, 5):
4374 		case IP_VERSION(3, 1, 6):
4375 		case IP_VERSION(3, 2, 0):
4376 		case IP_VERSION(3, 2, 1):
4377 			if (dcn10_register_irq_handlers(dm->adev)) {
4378 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4379 				goto fail;
4380 			}
4381 			break;
4382 		default:
4383 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4384 					adev->ip_versions[DCE_HWIP][0]);
4385 			goto fail;
4386 		}
4387 		break;
4388 	}
4389 
4390 	return 0;
4391 fail:
4392 	kfree(aencoder);
4393 	kfree(aconnector);
4394 
4395 	return -EINVAL;
4396 }
4397 
4398 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4399 {
4400 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4401 	return;
4402 }
4403 
4404 /******************************************************************************
4405  * amdgpu_display_funcs functions
4406  *****************************************************************************/
4407 
4408 /*
4409  * dm_bandwidth_update - program display watermarks
4410  *
4411  * @adev: amdgpu_device pointer
4412  *
4413  * Calculate and program the display watermarks and line buffer allocation.
4414  */
4415 static void dm_bandwidth_update(struct amdgpu_device *adev)
4416 {
4417 	/* TODO: implement later */
4418 }
4419 
4420 static const struct amdgpu_display_funcs dm_display_funcs = {
4421 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4422 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4423 	.backlight_set_level = NULL, /* never called for DC */
4424 	.backlight_get_level = NULL, /* never called for DC */
4425 	.hpd_sense = NULL,/* called unconditionally */
4426 	.hpd_set_polarity = NULL, /* called unconditionally */
4427 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4428 	.page_flip_get_scanoutpos =
4429 		dm_crtc_get_scanoutpos,/* called unconditionally */
4430 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4431 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4432 };
4433 
4434 #if defined(CONFIG_DEBUG_KERNEL_DC)
4435 
4436 static ssize_t s3_debug_store(struct device *device,
4437 			      struct device_attribute *attr,
4438 			      const char *buf,
4439 			      size_t count)
4440 {
4441 	int ret;
4442 	int s3_state;
4443 	struct drm_device *drm_dev = dev_get_drvdata(device);
4444 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4445 
4446 	ret = kstrtoint(buf, 0, &s3_state);
4447 
4448 	if (ret == 0) {
4449 		if (s3_state) {
4450 			dm_resume(adev);
4451 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4452 		} else
4453 			dm_suspend(adev);
4454 	}
4455 
4456 	return ret == 0 ? count : 0;
4457 }
4458 
4459 DEVICE_ATTR_WO(s3_debug);
4460 
4461 #endif
4462 
4463 static int dm_early_init(void *handle)
4464 {
4465 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4466 
4467 	switch (adev->asic_type) {
4468 #if defined(CONFIG_DRM_AMD_DC_SI)
4469 	case CHIP_TAHITI:
4470 	case CHIP_PITCAIRN:
4471 	case CHIP_VERDE:
4472 		adev->mode_info.num_crtc = 6;
4473 		adev->mode_info.num_hpd = 6;
4474 		adev->mode_info.num_dig = 6;
4475 		break;
4476 	case CHIP_OLAND:
4477 		adev->mode_info.num_crtc = 2;
4478 		adev->mode_info.num_hpd = 2;
4479 		adev->mode_info.num_dig = 2;
4480 		break;
4481 #endif
4482 	case CHIP_BONAIRE:
4483 	case CHIP_HAWAII:
4484 		adev->mode_info.num_crtc = 6;
4485 		adev->mode_info.num_hpd = 6;
4486 		adev->mode_info.num_dig = 6;
4487 		break;
4488 	case CHIP_KAVERI:
4489 		adev->mode_info.num_crtc = 4;
4490 		adev->mode_info.num_hpd = 6;
4491 		adev->mode_info.num_dig = 7;
4492 		break;
4493 	case CHIP_KABINI:
4494 	case CHIP_MULLINS:
4495 		adev->mode_info.num_crtc = 2;
4496 		adev->mode_info.num_hpd = 6;
4497 		adev->mode_info.num_dig = 6;
4498 		break;
4499 	case CHIP_FIJI:
4500 	case CHIP_TONGA:
4501 		adev->mode_info.num_crtc = 6;
4502 		adev->mode_info.num_hpd = 6;
4503 		adev->mode_info.num_dig = 7;
4504 		break;
4505 	case CHIP_CARRIZO:
4506 		adev->mode_info.num_crtc = 3;
4507 		adev->mode_info.num_hpd = 6;
4508 		adev->mode_info.num_dig = 9;
4509 		break;
4510 	case CHIP_STONEY:
4511 		adev->mode_info.num_crtc = 2;
4512 		adev->mode_info.num_hpd = 6;
4513 		adev->mode_info.num_dig = 9;
4514 		break;
4515 	case CHIP_POLARIS11:
4516 	case CHIP_POLARIS12:
4517 		adev->mode_info.num_crtc = 5;
4518 		adev->mode_info.num_hpd = 5;
4519 		adev->mode_info.num_dig = 5;
4520 		break;
4521 	case CHIP_POLARIS10:
4522 	case CHIP_VEGAM:
4523 		adev->mode_info.num_crtc = 6;
4524 		adev->mode_info.num_hpd = 6;
4525 		adev->mode_info.num_dig = 6;
4526 		break;
4527 	case CHIP_VEGA10:
4528 	case CHIP_VEGA12:
4529 	case CHIP_VEGA20:
4530 		adev->mode_info.num_crtc = 6;
4531 		adev->mode_info.num_hpd = 6;
4532 		adev->mode_info.num_dig = 6;
4533 		break;
4534 	default:
4535 
4536 		switch (adev->ip_versions[DCE_HWIP][0]) {
4537 		case IP_VERSION(2, 0, 2):
4538 		case IP_VERSION(3, 0, 0):
4539 			adev->mode_info.num_crtc = 6;
4540 			adev->mode_info.num_hpd = 6;
4541 			adev->mode_info.num_dig = 6;
4542 			break;
4543 		case IP_VERSION(2, 0, 0):
4544 		case IP_VERSION(3, 0, 2):
4545 			adev->mode_info.num_crtc = 5;
4546 			adev->mode_info.num_hpd = 5;
4547 			adev->mode_info.num_dig = 5;
4548 			break;
4549 		case IP_VERSION(2, 0, 3):
4550 		case IP_VERSION(3, 0, 3):
4551 			adev->mode_info.num_crtc = 2;
4552 			adev->mode_info.num_hpd = 2;
4553 			adev->mode_info.num_dig = 2;
4554 			break;
4555 		case IP_VERSION(1, 0, 0):
4556 		case IP_VERSION(1, 0, 1):
4557 		case IP_VERSION(3, 0, 1):
4558 		case IP_VERSION(2, 1, 0):
4559 		case IP_VERSION(3, 1, 2):
4560 		case IP_VERSION(3, 1, 3):
4561 		case IP_VERSION(3, 1, 5):
4562 		case IP_VERSION(3, 1, 6):
4563 		case IP_VERSION(3, 2, 0):
4564 		case IP_VERSION(3, 2, 1):
4565 			adev->mode_info.num_crtc = 4;
4566 			adev->mode_info.num_hpd = 4;
4567 			adev->mode_info.num_dig = 4;
4568 			break;
4569 		default:
4570 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4571 					adev->ip_versions[DCE_HWIP][0]);
4572 			return -EINVAL;
4573 		}
4574 		break;
4575 	}
4576 
4577 	amdgpu_dm_set_irq_funcs(adev);
4578 
4579 	if (adev->mode_info.funcs == NULL)
4580 		adev->mode_info.funcs = &dm_display_funcs;
4581 
4582 	/*
4583 	 * Note: Do NOT change adev->audio_endpt_rreg and
4584 	 * adev->audio_endpt_wreg because they are initialised in
4585 	 * amdgpu_device_init()
4586 	 */
4587 #if defined(CONFIG_DEBUG_KERNEL_DC)
4588 	device_create_file(
4589 		adev_to_drm(adev)->dev,
4590 		&dev_attr_s3_debug);
4591 #endif
4592 
4593 	return 0;
4594 }
4595 
4596 static bool modeset_required(struct drm_crtc_state *crtc_state,
4597 			     struct dc_stream_state *new_stream,
4598 			     struct dc_stream_state *old_stream)
4599 {
4600 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4601 }
4602 
4603 static bool modereset_required(struct drm_crtc_state *crtc_state)
4604 {
4605 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4606 }
4607 
4608 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4609 {
4610 	drm_encoder_cleanup(encoder);
4611 	kfree(encoder);
4612 }
4613 
4614 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4615 	.destroy = amdgpu_dm_encoder_destroy,
4616 };
4617 
4618 
4619 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4620 					 struct drm_framebuffer *fb,
4621 					 int *min_downscale, int *max_upscale)
4622 {
4623 	struct amdgpu_device *adev = drm_to_adev(dev);
4624 	struct dc *dc = adev->dm.dc;
4625 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4626 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4627 
4628 	switch (fb->format->format) {
4629 	case DRM_FORMAT_P010:
4630 	case DRM_FORMAT_NV12:
4631 	case DRM_FORMAT_NV21:
4632 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4633 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4634 		break;
4635 
4636 	case DRM_FORMAT_XRGB16161616F:
4637 	case DRM_FORMAT_ARGB16161616F:
4638 	case DRM_FORMAT_XBGR16161616F:
4639 	case DRM_FORMAT_ABGR16161616F:
4640 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4641 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4642 		break;
4643 
4644 	default:
4645 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4646 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4647 		break;
4648 	}
4649 
4650 	/*
4651 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4652 	 * scaling factor of 1.0 == 1000 units.
4653 	 */
4654 	if (*max_upscale == 1)
4655 		*max_upscale = 1000;
4656 
4657 	if (*min_downscale == 1)
4658 		*min_downscale = 1000;
4659 }
4660 
4661 
4662 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4663 				const struct drm_plane_state *state,
4664 				struct dc_scaling_info *scaling_info)
4665 {
4666 	int scale_w, scale_h, min_downscale, max_upscale;
4667 
4668 	memset(scaling_info, 0, sizeof(*scaling_info));
4669 
4670 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4671 	scaling_info->src_rect.x = state->src_x >> 16;
4672 	scaling_info->src_rect.y = state->src_y >> 16;
4673 
4674 	/*
4675 	 * For reasons we don't (yet) fully understand a non-zero
4676 	 * src_y coordinate into an NV12 buffer can cause a
4677 	 * system hang on DCN1x.
4678 	 * To avoid hangs (and maybe be overly cautious)
4679 	 * let's reject both non-zero src_x and src_y.
4680 	 *
4681 	 * We currently know of only one use-case to reproduce a
4682 	 * scenario with non-zero src_x and src_y for NV12, which
4683 	 * is to gesture the YouTube Android app into full screen
4684 	 * on ChromeOS.
4685 	 */
4686 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4687 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4688 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4689 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4690 		return -EINVAL;
4691 
4692 	scaling_info->src_rect.width = state->src_w >> 16;
4693 	if (scaling_info->src_rect.width == 0)
4694 		return -EINVAL;
4695 
4696 	scaling_info->src_rect.height = state->src_h >> 16;
4697 	if (scaling_info->src_rect.height == 0)
4698 		return -EINVAL;
4699 
4700 	scaling_info->dst_rect.x = state->crtc_x;
4701 	scaling_info->dst_rect.y = state->crtc_y;
4702 
4703 	if (state->crtc_w == 0)
4704 		return -EINVAL;
4705 
4706 	scaling_info->dst_rect.width = state->crtc_w;
4707 
4708 	if (state->crtc_h == 0)
4709 		return -EINVAL;
4710 
4711 	scaling_info->dst_rect.height = state->crtc_h;
4712 
4713 	/* DRM doesn't specify clipping on destination output. */
4714 	scaling_info->clip_rect = scaling_info->dst_rect;
4715 
4716 	/* Validate scaling per-format with DC plane caps */
4717 	if (state->plane && state->plane->dev && state->fb) {
4718 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4719 					     &min_downscale, &max_upscale);
4720 	} else {
4721 		min_downscale = 250;
4722 		max_upscale = 16000;
4723 	}
4724 
4725 	scale_w = scaling_info->dst_rect.width * 1000 /
4726 		  scaling_info->src_rect.width;
4727 
4728 	if (scale_w < min_downscale || scale_w > max_upscale)
4729 		return -EINVAL;
4730 
4731 	scale_h = scaling_info->dst_rect.height * 1000 /
4732 		  scaling_info->src_rect.height;
4733 
4734 	if (scale_h < min_downscale || scale_h > max_upscale)
4735 		return -EINVAL;
4736 
4737 	/*
4738 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4739 	 * assume reasonable defaults based on the format.
4740 	 */
4741 
4742 	return 0;
4743 }
4744 
4745 static void
4746 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4747 				 uint64_t tiling_flags)
4748 {
4749 	/* Fill GFX8 params */
4750 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4751 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4752 
4753 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4754 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4755 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4756 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4757 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4758 
4759 		/* XXX fix me for VI */
4760 		tiling_info->gfx8.num_banks = num_banks;
4761 		tiling_info->gfx8.array_mode =
4762 				DC_ARRAY_2D_TILED_THIN1;
4763 		tiling_info->gfx8.tile_split = tile_split;
4764 		tiling_info->gfx8.bank_width = bankw;
4765 		tiling_info->gfx8.bank_height = bankh;
4766 		tiling_info->gfx8.tile_aspect = mtaspect;
4767 		tiling_info->gfx8.tile_mode =
4768 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4769 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4770 			== DC_ARRAY_1D_TILED_THIN1) {
4771 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4772 	}
4773 
4774 	tiling_info->gfx8.pipe_config =
4775 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4776 }
4777 
4778 static void
4779 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4780 				  union dc_tiling_info *tiling_info)
4781 {
4782 	tiling_info->gfx9.num_pipes =
4783 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4784 	tiling_info->gfx9.num_banks =
4785 		adev->gfx.config.gb_addr_config_fields.num_banks;
4786 	tiling_info->gfx9.pipe_interleave =
4787 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4788 	tiling_info->gfx9.num_shader_engines =
4789 		adev->gfx.config.gb_addr_config_fields.num_se;
4790 	tiling_info->gfx9.max_compressed_frags =
4791 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4792 	tiling_info->gfx9.num_rb_per_se =
4793 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4794 	tiling_info->gfx9.shaderEnable = 1;
4795 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4796 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4797 }
4798 
4799 static int
4800 validate_dcc(struct amdgpu_device *adev,
4801 	     const enum surface_pixel_format format,
4802 	     const enum dc_rotation_angle rotation,
4803 	     const union dc_tiling_info *tiling_info,
4804 	     const struct dc_plane_dcc_param *dcc,
4805 	     const struct dc_plane_address *address,
4806 	     const struct plane_size *plane_size)
4807 {
4808 	struct dc *dc = adev->dm.dc;
4809 	struct dc_dcc_surface_param input;
4810 	struct dc_surface_dcc_cap output;
4811 
4812 	memset(&input, 0, sizeof(input));
4813 	memset(&output, 0, sizeof(output));
4814 
4815 	if (!dcc->enable)
4816 		return 0;
4817 
4818 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4819 	    !dc->cap_funcs.get_dcc_compression_cap)
4820 		return -EINVAL;
4821 
4822 	input.format = format;
4823 	input.surface_size.width = plane_size->surface_size.width;
4824 	input.surface_size.height = plane_size->surface_size.height;
4825 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4826 
4827 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4828 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4829 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4830 		input.scan = SCAN_DIRECTION_VERTICAL;
4831 
4832 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4833 		return -EINVAL;
4834 
4835 	if (!output.capable)
4836 		return -EINVAL;
4837 
4838 	if (dcc->independent_64b_blks == 0 &&
4839 	    output.grph.rgb.independent_64b_blks != 0)
4840 		return -EINVAL;
4841 
4842 	return 0;
4843 }
4844 
4845 static bool
4846 modifier_has_dcc(uint64_t modifier)
4847 {
4848 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4849 }
4850 
4851 static unsigned
4852 modifier_gfx9_swizzle_mode(uint64_t modifier)
4853 {
4854 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4855 		return 0;
4856 
4857 	return AMD_FMT_MOD_GET(TILE, modifier);
4858 }
4859 
4860 static const struct drm_format_info *
4861 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4862 {
4863 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4864 }
4865 
4866 static void
4867 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4868 				    union dc_tiling_info *tiling_info,
4869 				    uint64_t modifier)
4870 {
4871 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4872 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4873 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4874 	unsigned int pipes_log2;
4875 
4876 	pipes_log2 = min(5u, mod_pipe_xor_bits);
4877 
4878 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4879 
4880 	if (!IS_AMD_FMT_MOD(modifier))
4881 		return;
4882 
4883 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4884 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4885 
4886 	if (adev->family >= AMDGPU_FAMILY_NV) {
4887 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4888 	} else {
4889 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4890 
4891 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4892 	}
4893 }
4894 
4895 enum dm_micro_swizzle {
4896 	MICRO_SWIZZLE_Z = 0,
4897 	MICRO_SWIZZLE_S = 1,
4898 	MICRO_SWIZZLE_D = 2,
4899 	MICRO_SWIZZLE_R = 3
4900 };
4901 
4902 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4903 					  uint32_t format,
4904 					  uint64_t modifier)
4905 {
4906 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4907 	const struct drm_format_info *info = drm_format_info(format);
4908 	int i;
4909 
4910 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4911 
4912 	if (!info)
4913 		return false;
4914 
4915 	/*
4916 	 * We always have to allow these modifiers:
4917 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4918 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4919 	 */
4920 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4921 	    modifier == DRM_FORMAT_MOD_INVALID) {
4922 		return true;
4923 	}
4924 
4925 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4926 	for (i = 0; i < plane->modifier_count; i++) {
4927 		if (modifier == plane->modifiers[i])
4928 			break;
4929 	}
4930 	if (i == plane->modifier_count)
4931 		return false;
4932 
4933 	/*
4934 	 * For D swizzle the canonical modifier depends on the bpp, so check
4935 	 * it here.
4936 	 */
4937 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4938 	    adev->family >= AMDGPU_FAMILY_NV) {
4939 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4940 			return false;
4941 	}
4942 
4943 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4944 	    info->cpp[0] < 8)
4945 		return false;
4946 
4947 	if (modifier_has_dcc(modifier)) {
4948 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4949 		if (info->cpp[0] != 4)
4950 			return false;
4951 		/* We support multi-planar formats, but not when combined with
4952 		 * additional DCC metadata planes. */
4953 		if (info->num_planes > 1)
4954 			return false;
4955 	}
4956 
4957 	return true;
4958 }
4959 
4960 static void
4961 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4962 {
4963 	if (!*mods)
4964 		return;
4965 
4966 	if (*cap - *size < 1) {
4967 		uint64_t new_cap = *cap * 2;
4968 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4969 
4970 		if (!new_mods) {
4971 			kfree(*mods);
4972 			*mods = NULL;
4973 			return;
4974 		}
4975 
4976 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4977 		kfree(*mods);
4978 		*mods = new_mods;
4979 		*cap = new_cap;
4980 	}
4981 
4982 	(*mods)[*size] = mod;
4983 	*size += 1;
4984 }
4985 
4986 static void
4987 add_gfx9_modifiers(const struct amdgpu_device *adev,
4988 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4989 {
4990 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4991 	int pipe_xor_bits = min(8, pipes +
4992 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4993 	int bank_xor_bits = min(8 - pipe_xor_bits,
4994 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4995 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4996 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4997 
4998 
4999 	if (adev->family == AMDGPU_FAMILY_RV) {
5000 		/* Raven2 and later */
5001 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5002 
5003 		/*
5004 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5005 		 * doesn't support _D on DCN
5006 		 */
5007 
5008 		if (has_constant_encode) {
5009 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5010 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5011 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5012 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5013 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5014 				    AMD_FMT_MOD_SET(DCC, 1) |
5015 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5016 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5017 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5018 		}
5019 
5020 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5021 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5022 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5023 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5024 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5025 			    AMD_FMT_MOD_SET(DCC, 1) |
5026 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5027 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5028 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5029 
5030 		if (has_constant_encode) {
5031 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5032 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5033 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5034 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5035 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5036 				    AMD_FMT_MOD_SET(DCC, 1) |
5037 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5038 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5039 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5040 
5041 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5042 				    AMD_FMT_MOD_SET(RB, rb) |
5043 				    AMD_FMT_MOD_SET(PIPE, pipes));
5044 		}
5045 
5046 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5047 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5048 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5049 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5050 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5051 			    AMD_FMT_MOD_SET(DCC, 1) |
5052 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5053 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5054 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5055 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5056 			    AMD_FMT_MOD_SET(RB, rb) |
5057 			    AMD_FMT_MOD_SET(PIPE, pipes));
5058 	}
5059 
5060 	/*
5061 	 * Only supported for 64bpp on Raven, will be filtered on format in
5062 	 * dm_plane_format_mod_supported.
5063 	 */
5064 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5065 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5066 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5067 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5068 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5069 
5070 	if (adev->family == AMDGPU_FAMILY_RV) {
5071 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5072 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5073 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5074 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5075 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5076 	}
5077 
5078 	/*
5079 	 * Only supported for 64bpp on Raven, will be filtered on format in
5080 	 * dm_plane_format_mod_supported.
5081 	 */
5082 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5083 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5084 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5085 
5086 	if (adev->family == AMDGPU_FAMILY_RV) {
5087 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5088 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5089 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5090 	}
5091 }
5092 
5093 static void
5094 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5095 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5096 {
5097 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5098 
5099 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5100 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5101 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5102 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5103 		    AMD_FMT_MOD_SET(DCC, 1) |
5104 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5105 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5106 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5107 
5108 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5110 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5111 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5112 		    AMD_FMT_MOD_SET(DCC, 1) |
5113 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5114 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5115 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5116 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5117 
5118 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5119 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5120 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5121 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5122 
5123 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5124 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5125 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5126 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5127 
5128 
5129 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5130 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5131 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5132 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5133 
5134 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5135 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5136 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5137 }
5138 
5139 static void
5140 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5141 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5142 {
5143 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5144 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5145 
5146 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5147 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5148 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5149 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5150 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5151 		    AMD_FMT_MOD_SET(DCC, 1) |
5152 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5153 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5154 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5155 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5156 
5157 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5158 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5159 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5160 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5161 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5162 		    AMD_FMT_MOD_SET(DCC, 1) |
5163 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5164 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5165 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5166 
5167 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5168 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5169 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5170 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5171 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5172 		    AMD_FMT_MOD_SET(DCC, 1) |
5173 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5174 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5175 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5176 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5177 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5178 
5179 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5180 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5181 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5182 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5183 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5184 		    AMD_FMT_MOD_SET(DCC, 1) |
5185 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5186 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5187 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5188 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5189 
5190 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5191 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5192 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5193 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5194 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5195 
5196 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5197 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5198 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5199 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5200 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5201 
5202 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5203 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5204 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5205 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5206 
5207 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5208 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5209 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5210 }
5211 
5212 static void
5213 add_gfx11_modifiers(struct amdgpu_device *adev,
5214 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5215 {
5216 	int num_pipes = 0;
5217 	int pipe_xor_bits = 0;
5218 	int num_pkrs = 0;
5219 	int pkrs = 0;
5220 	u32 gb_addr_config;
5221 	u8 i = 0;
5222 	unsigned swizzle_r_x;
5223 	uint64_t modifier_r_x;
5224 	uint64_t modifier_dcc_best;
5225 	uint64_t modifier_dcc_4k;
5226 
5227 	/* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5228 	 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5229 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5230 	ASSERT(gb_addr_config != 0);
5231 
5232 	num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5233 	pkrs = ilog2(num_pkrs);
5234 	num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5235 	pipe_xor_bits = ilog2(num_pipes);
5236 
5237 	for (i = 0; i < 2; i++) {
5238 		/* Insert the best one first. */
5239 		/* R_X swizzle modes are the best for rendering and DCC requires them. */
5240 		if (num_pipes > 16)
5241 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5242 		else
5243 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
5244 
5245 		modifier_r_x = AMD_FMT_MOD |
5246 			       AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5247 			       AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5248 			       AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5249 			       AMD_FMT_MOD_SET(PACKERS, pkrs);
5250 
5251 		/* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5252 		modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5253 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5254 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5255 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5256 
5257 		/* DCC settings for 4K and greater resolutions. (required by display hw) */
5258 		modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5259 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5260 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5261 				  AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5262 
5263 		add_modifier(mods, size, capacity, modifier_dcc_best);
5264 		add_modifier(mods, size, capacity, modifier_dcc_4k);
5265 
5266 		add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5267 		add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5268 
5269 		add_modifier(mods, size, capacity, modifier_r_x);
5270 	}
5271 
5272 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5273              AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5274 			 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5275 }
5276 
5277 static int
5278 get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5279 {
5280 	uint64_t size = 0, capacity = 128;
5281 	*mods = NULL;
5282 
5283 	/* We have not hooked up any pre-GFX9 modifiers. */
5284 	if (adev->family < AMDGPU_FAMILY_AI)
5285 		return 0;
5286 
5287 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5288 
5289 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5290 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5291 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5292 		return *mods ? 0 : -ENOMEM;
5293 	}
5294 
5295 	switch (adev->family) {
5296 	case AMDGPU_FAMILY_AI:
5297 	case AMDGPU_FAMILY_RV:
5298 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5299 		break;
5300 	case AMDGPU_FAMILY_NV:
5301 	case AMDGPU_FAMILY_VGH:
5302 	case AMDGPU_FAMILY_YC:
5303 	case AMDGPU_FAMILY_GC_10_3_6:
5304 	case AMDGPU_FAMILY_GC_10_3_7:
5305 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5306 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5307 		else
5308 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5309 		break;
5310 	case AMDGPU_FAMILY_GC_11_0_0:
5311 		add_gfx11_modifiers(adev, mods, &size, &capacity);
5312 		break;
5313 	}
5314 
5315 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5316 
5317 	/* INVALID marks the end of the list. */
5318 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5319 
5320 	if (!*mods)
5321 		return -ENOMEM;
5322 
5323 	return 0;
5324 }
5325 
5326 static int
5327 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5328 					  const struct amdgpu_framebuffer *afb,
5329 					  const enum surface_pixel_format format,
5330 					  const enum dc_rotation_angle rotation,
5331 					  const struct plane_size *plane_size,
5332 					  union dc_tiling_info *tiling_info,
5333 					  struct dc_plane_dcc_param *dcc,
5334 					  struct dc_plane_address *address,
5335 					  const bool force_disable_dcc)
5336 {
5337 	const uint64_t modifier = afb->base.modifier;
5338 	int ret = 0;
5339 
5340 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5341 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5342 
5343 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5344 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5345 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5346 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5347 
5348 		dcc->enable = 1;
5349 		dcc->meta_pitch = afb->base.pitches[1];
5350 		dcc->independent_64b_blks = independent_64b_blks;
5351 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5352 			if (independent_64b_blks && independent_128b_blks)
5353 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5354 			else if (independent_128b_blks)
5355 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5356 			else if (independent_64b_blks && !independent_128b_blks)
5357 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5358 			else
5359 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5360 		} else {
5361 			if (independent_64b_blks)
5362 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5363 			else
5364 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5365 		}
5366 
5367 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5368 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5369 	}
5370 
5371 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5372 	if (ret)
5373 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5374 
5375 	return ret;
5376 }
5377 
5378 static int
5379 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5380 			     const struct amdgpu_framebuffer *afb,
5381 			     const enum surface_pixel_format format,
5382 			     const enum dc_rotation_angle rotation,
5383 			     const uint64_t tiling_flags,
5384 			     union dc_tiling_info *tiling_info,
5385 			     struct plane_size *plane_size,
5386 			     struct dc_plane_dcc_param *dcc,
5387 			     struct dc_plane_address *address,
5388 			     bool tmz_surface,
5389 			     bool force_disable_dcc)
5390 {
5391 	const struct drm_framebuffer *fb = &afb->base;
5392 	int ret;
5393 
5394 	memset(tiling_info, 0, sizeof(*tiling_info));
5395 	memset(plane_size, 0, sizeof(*plane_size));
5396 	memset(dcc, 0, sizeof(*dcc));
5397 	memset(address, 0, sizeof(*address));
5398 
5399 	address->tmz_surface = tmz_surface;
5400 
5401 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5402 		uint64_t addr = afb->address + fb->offsets[0];
5403 
5404 		plane_size->surface_size.x = 0;
5405 		plane_size->surface_size.y = 0;
5406 		plane_size->surface_size.width = fb->width;
5407 		plane_size->surface_size.height = fb->height;
5408 		plane_size->surface_pitch =
5409 			fb->pitches[0] / fb->format->cpp[0];
5410 
5411 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5412 		address->grph.addr.low_part = lower_32_bits(addr);
5413 		address->grph.addr.high_part = upper_32_bits(addr);
5414 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5415 		uint64_t luma_addr = afb->address + fb->offsets[0];
5416 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5417 
5418 		plane_size->surface_size.x = 0;
5419 		plane_size->surface_size.y = 0;
5420 		plane_size->surface_size.width = fb->width;
5421 		plane_size->surface_size.height = fb->height;
5422 		plane_size->surface_pitch =
5423 			fb->pitches[0] / fb->format->cpp[0];
5424 
5425 		plane_size->chroma_size.x = 0;
5426 		plane_size->chroma_size.y = 0;
5427 		/* TODO: set these based on surface format */
5428 		plane_size->chroma_size.width = fb->width / 2;
5429 		plane_size->chroma_size.height = fb->height / 2;
5430 
5431 		plane_size->chroma_pitch =
5432 			fb->pitches[1] / fb->format->cpp[1];
5433 
5434 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5435 		address->video_progressive.luma_addr.low_part =
5436 			lower_32_bits(luma_addr);
5437 		address->video_progressive.luma_addr.high_part =
5438 			upper_32_bits(luma_addr);
5439 		address->video_progressive.chroma_addr.low_part =
5440 			lower_32_bits(chroma_addr);
5441 		address->video_progressive.chroma_addr.high_part =
5442 			upper_32_bits(chroma_addr);
5443 	}
5444 
5445 	if (adev->family >= AMDGPU_FAMILY_AI) {
5446 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5447 								rotation, plane_size,
5448 								tiling_info, dcc,
5449 								address,
5450 								force_disable_dcc);
5451 		if (ret)
5452 			return ret;
5453 	} else {
5454 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5455 	}
5456 
5457 	return 0;
5458 }
5459 
5460 static void
5461 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5462 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5463 			       bool *global_alpha, int *global_alpha_value)
5464 {
5465 	*per_pixel_alpha = false;
5466 	*pre_multiplied_alpha = true;
5467 	*global_alpha = false;
5468 	*global_alpha_value = 0xff;
5469 
5470 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5471 		return;
5472 
5473 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5474 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5475 		static const uint32_t alpha_formats[] = {
5476 			DRM_FORMAT_ARGB8888,
5477 			DRM_FORMAT_RGBA8888,
5478 			DRM_FORMAT_ABGR8888,
5479 		};
5480 		uint32_t format = plane_state->fb->format->format;
5481 		unsigned int i;
5482 
5483 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5484 			if (format == alpha_formats[i]) {
5485 				*per_pixel_alpha = true;
5486 				break;
5487 			}
5488 		}
5489 
5490 		if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5491 			*pre_multiplied_alpha = false;
5492 	}
5493 
5494 	if (plane_state->alpha < 0xffff) {
5495 		*global_alpha = true;
5496 		*global_alpha_value = plane_state->alpha >> 8;
5497 	}
5498 }
5499 
5500 static int
5501 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5502 			    const enum surface_pixel_format format,
5503 			    enum dc_color_space *color_space)
5504 {
5505 	bool full_range;
5506 
5507 	*color_space = COLOR_SPACE_SRGB;
5508 
5509 	/* DRM color properties only affect non-RGB formats. */
5510 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5511 		return 0;
5512 
5513 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5514 
5515 	switch (plane_state->color_encoding) {
5516 	case DRM_COLOR_YCBCR_BT601:
5517 		if (full_range)
5518 			*color_space = COLOR_SPACE_YCBCR601;
5519 		else
5520 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5521 		break;
5522 
5523 	case DRM_COLOR_YCBCR_BT709:
5524 		if (full_range)
5525 			*color_space = COLOR_SPACE_YCBCR709;
5526 		else
5527 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5528 		break;
5529 
5530 	case DRM_COLOR_YCBCR_BT2020:
5531 		if (full_range)
5532 			*color_space = COLOR_SPACE_2020_YCBCR;
5533 		else
5534 			return -EINVAL;
5535 		break;
5536 
5537 	default:
5538 		return -EINVAL;
5539 	}
5540 
5541 	return 0;
5542 }
5543 
5544 static int
5545 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5546 			    const struct drm_plane_state *plane_state,
5547 			    const uint64_t tiling_flags,
5548 			    struct dc_plane_info *plane_info,
5549 			    struct dc_plane_address *address,
5550 			    bool tmz_surface,
5551 			    bool force_disable_dcc)
5552 {
5553 	const struct drm_framebuffer *fb = plane_state->fb;
5554 	const struct amdgpu_framebuffer *afb =
5555 		to_amdgpu_framebuffer(plane_state->fb);
5556 	int ret;
5557 
5558 	memset(plane_info, 0, sizeof(*plane_info));
5559 
5560 	switch (fb->format->format) {
5561 	case DRM_FORMAT_C8:
5562 		plane_info->format =
5563 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5564 		break;
5565 	case DRM_FORMAT_RGB565:
5566 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5567 		break;
5568 	case DRM_FORMAT_XRGB8888:
5569 	case DRM_FORMAT_ARGB8888:
5570 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5571 		break;
5572 	case DRM_FORMAT_XRGB2101010:
5573 	case DRM_FORMAT_ARGB2101010:
5574 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5575 		break;
5576 	case DRM_FORMAT_XBGR2101010:
5577 	case DRM_FORMAT_ABGR2101010:
5578 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5579 		break;
5580 	case DRM_FORMAT_XBGR8888:
5581 	case DRM_FORMAT_ABGR8888:
5582 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5583 		break;
5584 	case DRM_FORMAT_NV21:
5585 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5586 		break;
5587 	case DRM_FORMAT_NV12:
5588 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5589 		break;
5590 	case DRM_FORMAT_P010:
5591 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5592 		break;
5593 	case DRM_FORMAT_XRGB16161616F:
5594 	case DRM_FORMAT_ARGB16161616F:
5595 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5596 		break;
5597 	case DRM_FORMAT_XBGR16161616F:
5598 	case DRM_FORMAT_ABGR16161616F:
5599 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5600 		break;
5601 	case DRM_FORMAT_XRGB16161616:
5602 	case DRM_FORMAT_ARGB16161616:
5603 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5604 		break;
5605 	case DRM_FORMAT_XBGR16161616:
5606 	case DRM_FORMAT_ABGR16161616:
5607 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5608 		break;
5609 	default:
5610 		DRM_ERROR(
5611 			"Unsupported screen format %p4cc\n",
5612 			&fb->format->format);
5613 		return -EINVAL;
5614 	}
5615 
5616 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5617 	case DRM_MODE_ROTATE_0:
5618 		plane_info->rotation = ROTATION_ANGLE_0;
5619 		break;
5620 	case DRM_MODE_ROTATE_90:
5621 		plane_info->rotation = ROTATION_ANGLE_90;
5622 		break;
5623 	case DRM_MODE_ROTATE_180:
5624 		plane_info->rotation = ROTATION_ANGLE_180;
5625 		break;
5626 	case DRM_MODE_ROTATE_270:
5627 		plane_info->rotation = ROTATION_ANGLE_270;
5628 		break;
5629 	default:
5630 		plane_info->rotation = ROTATION_ANGLE_0;
5631 		break;
5632 	}
5633 
5634 	plane_info->visible = true;
5635 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5636 
5637 	plane_info->layer_index = 0;
5638 
5639 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5640 					  &plane_info->color_space);
5641 	if (ret)
5642 		return ret;
5643 
5644 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5645 					   plane_info->rotation, tiling_flags,
5646 					   &plane_info->tiling_info,
5647 					   &plane_info->plane_size,
5648 					   &plane_info->dcc, address, tmz_surface,
5649 					   force_disable_dcc);
5650 	if (ret)
5651 		return ret;
5652 
5653 	fill_blending_from_plane_state(
5654 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5655 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5656 
5657 	return 0;
5658 }
5659 
5660 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5661 				    struct dc_plane_state *dc_plane_state,
5662 				    struct drm_plane_state *plane_state,
5663 				    struct drm_crtc_state *crtc_state)
5664 {
5665 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5666 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5667 	struct dc_scaling_info scaling_info;
5668 	struct dc_plane_info plane_info;
5669 	int ret;
5670 	bool force_disable_dcc = false;
5671 
5672 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5673 	if (ret)
5674 		return ret;
5675 
5676 	dc_plane_state->src_rect = scaling_info.src_rect;
5677 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5678 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5679 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5680 
5681 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5682 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5683 					  afb->tiling_flags,
5684 					  &plane_info,
5685 					  &dc_plane_state->address,
5686 					  afb->tmz_surface,
5687 					  force_disable_dcc);
5688 	if (ret)
5689 		return ret;
5690 
5691 	dc_plane_state->format = plane_info.format;
5692 	dc_plane_state->color_space = plane_info.color_space;
5693 	dc_plane_state->format = plane_info.format;
5694 	dc_plane_state->plane_size = plane_info.plane_size;
5695 	dc_plane_state->rotation = plane_info.rotation;
5696 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5697 	dc_plane_state->stereo_format = plane_info.stereo_format;
5698 	dc_plane_state->tiling_info = plane_info.tiling_info;
5699 	dc_plane_state->visible = plane_info.visible;
5700 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5701 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5702 	dc_plane_state->global_alpha = plane_info.global_alpha;
5703 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5704 	dc_plane_state->dcc = plane_info.dcc;
5705 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5706 	dc_plane_state->flip_int_enabled = true;
5707 
5708 	/*
5709 	 * Always set input transfer function, since plane state is refreshed
5710 	 * every time.
5711 	 */
5712 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5713 	if (ret)
5714 		return ret;
5715 
5716 	return 0;
5717 }
5718 
5719 /**
5720  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5721  *
5722  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5723  *         remote fb
5724  * @old_plane_state: Old state of @plane
5725  * @new_plane_state: New state of @plane
5726  * @crtc_state: New state of CRTC connected to the @plane
5727  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5728  *
5729  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5730  * (referred to as "damage clips" in DRM nomenclature) that require updating on
5731  * the eDP remote buffer. The responsibility of specifying the dirty regions is
5732  * amdgpu_dm's.
5733  *
5734  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5735  * plane with regions that require flushing to the eDP remote buffer. In
5736  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5737  * implicitly provide damage clips without any client support via the plane
5738  * bounds.
5739  *
5740  * Today, amdgpu_dm only supports the MPO and cursor usecase.
5741  *
5742  * TODO: Also enable for FB_DAMAGE_CLIPS
5743  */
5744 static void fill_dc_dirty_rects(struct drm_plane *plane,
5745 				struct drm_plane_state *old_plane_state,
5746 				struct drm_plane_state *new_plane_state,
5747 				struct drm_crtc_state *crtc_state,
5748 				struct dc_flip_addrs *flip_addrs)
5749 {
5750 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5751 	struct rect *dirty_rects = flip_addrs->dirty_rects;
5752 	uint32_t num_clips;
5753 	bool bb_changed;
5754 	bool fb_changed;
5755 	uint32_t i = 0;
5756 
5757 	flip_addrs->dirty_rect_count = 0;
5758 
5759 	/*
5760 	 * Cursor plane has it's own dirty rect update interface. See
5761 	 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5762 	 */
5763 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
5764 		return;
5765 
5766 	/*
5767 	 * Today, we only consider MPO use-case for PSR SU. If MPO not
5768 	 * requested, and there is a plane update, do FFU.
5769 	 */
5770 	if (!dm_crtc_state->mpo_requested) {
5771 		dirty_rects[0].x = 0;
5772 		dirty_rects[0].y = 0;
5773 		dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
5774 		dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
5775 		flip_addrs->dirty_rect_count = 1;
5776 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5777 				 new_plane_state->plane->base.id,
5778 				 dm_crtc_state->base.mode.crtc_hdisplay,
5779 				 dm_crtc_state->base.mode.crtc_vdisplay);
5780 		return;
5781 	}
5782 
5783 	/*
5784 	 * MPO is requested. Add entire plane bounding box to dirty rects if
5785 	 * flipped to or damaged.
5786 	 *
5787 	 * If plane is moved or resized, also add old bounding box to dirty
5788 	 * rects.
5789 	 */
5790 	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5791 	fb_changed = old_plane_state->fb->base.id !=
5792 		     new_plane_state->fb->base.id;
5793 	bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5794 		      old_plane_state->crtc_y != new_plane_state->crtc_y ||
5795 		      old_plane_state->crtc_w != new_plane_state->crtc_w ||
5796 		      old_plane_state->crtc_h != new_plane_state->crtc_h);
5797 
5798 	DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5799 			 new_plane_state->plane->base.id,
5800 			 bb_changed, fb_changed, num_clips);
5801 
5802 	if (num_clips || fb_changed || bb_changed) {
5803 		dirty_rects[i].x = new_plane_state->crtc_x;
5804 		dirty_rects[i].y = new_plane_state->crtc_y;
5805 		dirty_rects[i].width = new_plane_state->crtc_w;
5806 		dirty_rects[i].height = new_plane_state->crtc_h;
5807 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5808 				 new_plane_state->plane->base.id,
5809 				 dirty_rects[i].x, dirty_rects[i].y,
5810 				 dirty_rects[i].width, dirty_rects[i].height);
5811 		i += 1;
5812 	}
5813 
5814 	/* Add old plane bounding-box if plane is moved or resized */
5815 	if (bb_changed) {
5816 		dirty_rects[i].x = old_plane_state->crtc_x;
5817 		dirty_rects[i].y = old_plane_state->crtc_y;
5818 		dirty_rects[i].width = old_plane_state->crtc_w;
5819 		dirty_rects[i].height = old_plane_state->crtc_h;
5820 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5821 				old_plane_state->plane->base.id,
5822 				dirty_rects[i].x, dirty_rects[i].y,
5823 				dirty_rects[i].width, dirty_rects[i].height);
5824 		i += 1;
5825 	}
5826 
5827 	flip_addrs->dirty_rect_count = i;
5828 }
5829 
5830 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5831 					   const struct dm_connector_state *dm_state,
5832 					   struct dc_stream_state *stream)
5833 {
5834 	enum amdgpu_rmx_type rmx_type;
5835 
5836 	struct rect src = { 0 }; /* viewport in composition space*/
5837 	struct rect dst = { 0 }; /* stream addressable area */
5838 
5839 	/* no mode. nothing to be done */
5840 	if (!mode)
5841 		return;
5842 
5843 	/* Full screen scaling by default */
5844 	src.width = mode->hdisplay;
5845 	src.height = mode->vdisplay;
5846 	dst.width = stream->timing.h_addressable;
5847 	dst.height = stream->timing.v_addressable;
5848 
5849 	if (dm_state) {
5850 		rmx_type = dm_state->scaling;
5851 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5852 			if (src.width * dst.height <
5853 					src.height * dst.width) {
5854 				/* height needs less upscaling/more downscaling */
5855 				dst.width = src.width *
5856 						dst.height / src.height;
5857 			} else {
5858 				/* width needs less upscaling/more downscaling */
5859 				dst.height = src.height *
5860 						dst.width / src.width;
5861 			}
5862 		} else if (rmx_type == RMX_CENTER) {
5863 			dst = src;
5864 		}
5865 
5866 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5867 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5868 
5869 		if (dm_state->underscan_enable) {
5870 			dst.x += dm_state->underscan_hborder / 2;
5871 			dst.y += dm_state->underscan_vborder / 2;
5872 			dst.width -= dm_state->underscan_hborder;
5873 			dst.height -= dm_state->underscan_vborder;
5874 		}
5875 	}
5876 
5877 	stream->src = src;
5878 	stream->dst = dst;
5879 
5880 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5881 		      dst.x, dst.y, dst.width, dst.height);
5882 
5883 }
5884 
5885 static enum dc_color_depth
5886 convert_color_depth_from_display_info(const struct drm_connector *connector,
5887 				      bool is_y420, int requested_bpc)
5888 {
5889 	uint8_t bpc;
5890 
5891 	if (is_y420) {
5892 		bpc = 8;
5893 
5894 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5895 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5896 			bpc = 16;
5897 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5898 			bpc = 12;
5899 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5900 			bpc = 10;
5901 	} else {
5902 		bpc = (uint8_t)connector->display_info.bpc;
5903 		/* Assume 8 bpc by default if no bpc is specified. */
5904 		bpc = bpc ? bpc : 8;
5905 	}
5906 
5907 	if (requested_bpc > 0) {
5908 		/*
5909 		 * Cap display bpc based on the user requested value.
5910 		 *
5911 		 * The value for state->max_bpc may not correctly updated
5912 		 * depending on when the connector gets added to the state
5913 		 * or if this was called outside of atomic check, so it
5914 		 * can't be used directly.
5915 		 */
5916 		bpc = min_t(u8, bpc, requested_bpc);
5917 
5918 		/* Round down to the nearest even number. */
5919 		bpc = bpc - (bpc & 1);
5920 	}
5921 
5922 	switch (bpc) {
5923 	case 0:
5924 		/*
5925 		 * Temporary Work around, DRM doesn't parse color depth for
5926 		 * EDID revision before 1.4
5927 		 * TODO: Fix edid parsing
5928 		 */
5929 		return COLOR_DEPTH_888;
5930 	case 6:
5931 		return COLOR_DEPTH_666;
5932 	case 8:
5933 		return COLOR_DEPTH_888;
5934 	case 10:
5935 		return COLOR_DEPTH_101010;
5936 	case 12:
5937 		return COLOR_DEPTH_121212;
5938 	case 14:
5939 		return COLOR_DEPTH_141414;
5940 	case 16:
5941 		return COLOR_DEPTH_161616;
5942 	default:
5943 		return COLOR_DEPTH_UNDEFINED;
5944 	}
5945 }
5946 
5947 static enum dc_aspect_ratio
5948 get_aspect_ratio(const struct drm_display_mode *mode_in)
5949 {
5950 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5951 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5952 }
5953 
5954 static enum dc_color_space
5955 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5956 {
5957 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5958 
5959 	switch (dc_crtc_timing->pixel_encoding)	{
5960 	case PIXEL_ENCODING_YCBCR422:
5961 	case PIXEL_ENCODING_YCBCR444:
5962 	case PIXEL_ENCODING_YCBCR420:
5963 	{
5964 		/*
5965 		 * 27030khz is the separation point between HDTV and SDTV
5966 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5967 		 * respectively
5968 		 */
5969 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5970 			if (dc_crtc_timing->flags.Y_ONLY)
5971 				color_space =
5972 					COLOR_SPACE_YCBCR709_LIMITED;
5973 			else
5974 				color_space = COLOR_SPACE_YCBCR709;
5975 		} else {
5976 			if (dc_crtc_timing->flags.Y_ONLY)
5977 				color_space =
5978 					COLOR_SPACE_YCBCR601_LIMITED;
5979 			else
5980 				color_space = COLOR_SPACE_YCBCR601;
5981 		}
5982 
5983 	}
5984 	break;
5985 	case PIXEL_ENCODING_RGB:
5986 		color_space = COLOR_SPACE_SRGB;
5987 		break;
5988 
5989 	default:
5990 		WARN_ON(1);
5991 		break;
5992 	}
5993 
5994 	return color_space;
5995 }
5996 
5997 static bool adjust_colour_depth_from_display_info(
5998 	struct dc_crtc_timing *timing_out,
5999 	const struct drm_display_info *info)
6000 {
6001 	enum dc_color_depth depth = timing_out->display_color_depth;
6002 	int normalized_clk;
6003 	do {
6004 		normalized_clk = timing_out->pix_clk_100hz / 10;
6005 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
6006 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
6007 			normalized_clk /= 2;
6008 		/* Adjusting pix clock following on HDMI spec based on colour depth */
6009 		switch (depth) {
6010 		case COLOR_DEPTH_888:
6011 			break;
6012 		case COLOR_DEPTH_101010:
6013 			normalized_clk = (normalized_clk * 30) / 24;
6014 			break;
6015 		case COLOR_DEPTH_121212:
6016 			normalized_clk = (normalized_clk * 36) / 24;
6017 			break;
6018 		case COLOR_DEPTH_161616:
6019 			normalized_clk = (normalized_clk * 48) / 24;
6020 			break;
6021 		default:
6022 			/* The above depths are the only ones valid for HDMI. */
6023 			return false;
6024 		}
6025 		if (normalized_clk <= info->max_tmds_clock) {
6026 			timing_out->display_color_depth = depth;
6027 			return true;
6028 		}
6029 	} while (--depth > COLOR_DEPTH_666);
6030 	return false;
6031 }
6032 
6033 static void fill_stream_properties_from_drm_display_mode(
6034 	struct dc_stream_state *stream,
6035 	const struct drm_display_mode *mode_in,
6036 	const struct drm_connector *connector,
6037 	const struct drm_connector_state *connector_state,
6038 	const struct dc_stream_state *old_stream,
6039 	int requested_bpc)
6040 {
6041 	struct dc_crtc_timing *timing_out = &stream->timing;
6042 	const struct drm_display_info *info = &connector->display_info;
6043 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6044 	struct hdmi_vendor_infoframe hv_frame;
6045 	struct hdmi_avi_infoframe avi_frame;
6046 
6047 	memset(&hv_frame, 0, sizeof(hv_frame));
6048 	memset(&avi_frame, 0, sizeof(avi_frame));
6049 
6050 	timing_out->h_border_left = 0;
6051 	timing_out->h_border_right = 0;
6052 	timing_out->v_border_top = 0;
6053 	timing_out->v_border_bottom = 0;
6054 	/* TODO: un-hardcode */
6055 	if (drm_mode_is_420_only(info, mode_in)
6056 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6057 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6058 	else if (drm_mode_is_420_also(info, mode_in)
6059 			&& aconnector->force_yuv420_output)
6060 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6061 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
6062 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6063 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6064 	else
6065 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6066 
6067 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6068 	timing_out->display_color_depth = convert_color_depth_from_display_info(
6069 		connector,
6070 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6071 		requested_bpc);
6072 	timing_out->scan_type = SCANNING_TYPE_NODATA;
6073 	timing_out->hdmi_vic = 0;
6074 
6075 	if(old_stream) {
6076 		timing_out->vic = old_stream->timing.vic;
6077 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6078 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6079 	} else {
6080 		timing_out->vic = drm_match_cea_mode(mode_in);
6081 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6082 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6083 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6084 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6085 	}
6086 
6087 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6088 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
6089 		timing_out->vic = avi_frame.video_code;
6090 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
6091 		timing_out->hdmi_vic = hv_frame.vic;
6092 	}
6093 
6094 	if (is_freesync_video_mode(mode_in, aconnector)) {
6095 		timing_out->h_addressable = mode_in->hdisplay;
6096 		timing_out->h_total = mode_in->htotal;
6097 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6098 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6099 		timing_out->v_total = mode_in->vtotal;
6100 		timing_out->v_addressable = mode_in->vdisplay;
6101 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6102 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6103 		timing_out->pix_clk_100hz = mode_in->clock * 10;
6104 	} else {
6105 		timing_out->h_addressable = mode_in->crtc_hdisplay;
6106 		timing_out->h_total = mode_in->crtc_htotal;
6107 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6108 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6109 		timing_out->v_total = mode_in->crtc_vtotal;
6110 		timing_out->v_addressable = mode_in->crtc_vdisplay;
6111 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6112 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6113 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6114 	}
6115 
6116 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
6117 
6118 	stream->output_color_space = get_output_color_space(timing_out);
6119 
6120 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6121 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
6122 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6123 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6124 		    drm_mode_is_420_also(info, mode_in) &&
6125 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6126 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6127 			adjust_colour_depth_from_display_info(timing_out, info);
6128 		}
6129 	}
6130 }
6131 
6132 static void fill_audio_info(struct audio_info *audio_info,
6133 			    const struct drm_connector *drm_connector,
6134 			    const struct dc_sink *dc_sink)
6135 {
6136 	int i = 0;
6137 	int cea_revision = 0;
6138 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6139 
6140 	audio_info->manufacture_id = edid_caps->manufacturer_id;
6141 	audio_info->product_id = edid_caps->product_id;
6142 
6143 	cea_revision = drm_connector->display_info.cea_rev;
6144 
6145 	strscpy(audio_info->display_name,
6146 		edid_caps->display_name,
6147 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6148 
6149 	if (cea_revision >= 3) {
6150 		audio_info->mode_count = edid_caps->audio_mode_count;
6151 
6152 		for (i = 0; i < audio_info->mode_count; ++i) {
6153 			audio_info->modes[i].format_code =
6154 					(enum audio_format_code)
6155 					(edid_caps->audio_modes[i].format_code);
6156 			audio_info->modes[i].channel_count =
6157 					edid_caps->audio_modes[i].channel_count;
6158 			audio_info->modes[i].sample_rates.all =
6159 					edid_caps->audio_modes[i].sample_rate;
6160 			audio_info->modes[i].sample_size =
6161 					edid_caps->audio_modes[i].sample_size;
6162 		}
6163 	}
6164 
6165 	audio_info->flags.all = edid_caps->speaker_flags;
6166 
6167 	/* TODO: We only check for the progressive mode, check for interlace mode too */
6168 	if (drm_connector->latency_present[0]) {
6169 		audio_info->video_latency = drm_connector->video_latency[0];
6170 		audio_info->audio_latency = drm_connector->audio_latency[0];
6171 	}
6172 
6173 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6174 
6175 }
6176 
6177 static void
6178 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6179 				      struct drm_display_mode *dst_mode)
6180 {
6181 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6182 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6183 	dst_mode->crtc_clock = src_mode->crtc_clock;
6184 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6185 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6186 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6187 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6188 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6189 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6190 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6191 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6192 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6193 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6194 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6195 }
6196 
6197 static void
6198 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6199 					const struct drm_display_mode *native_mode,
6200 					bool scale_enabled)
6201 {
6202 	if (scale_enabled) {
6203 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6204 	} else if (native_mode->clock == drm_mode->clock &&
6205 			native_mode->htotal == drm_mode->htotal &&
6206 			native_mode->vtotal == drm_mode->vtotal) {
6207 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6208 	} else {
6209 		/* no scaling nor amdgpu inserted, no need to patch */
6210 	}
6211 }
6212 
6213 static struct dc_sink *
6214 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6215 {
6216 	struct dc_sink_init_data sink_init_data = { 0 };
6217 	struct dc_sink *sink = NULL;
6218 	sink_init_data.link = aconnector->dc_link;
6219 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6220 
6221 	sink = dc_sink_create(&sink_init_data);
6222 	if (!sink) {
6223 		DRM_ERROR("Failed to create sink!\n");
6224 		return NULL;
6225 	}
6226 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6227 
6228 	return sink;
6229 }
6230 
6231 static void set_multisync_trigger_params(
6232 		struct dc_stream_state *stream)
6233 {
6234 	struct dc_stream_state *master = NULL;
6235 
6236 	if (stream->triggered_crtc_reset.enabled) {
6237 		master = stream->triggered_crtc_reset.event_source;
6238 		stream->triggered_crtc_reset.event =
6239 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6240 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6241 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6242 	}
6243 }
6244 
6245 static void set_master_stream(struct dc_stream_state *stream_set[],
6246 			      int stream_count)
6247 {
6248 	int j, highest_rfr = 0, master_stream = 0;
6249 
6250 	for (j = 0;  j < stream_count; j++) {
6251 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6252 			int refresh_rate = 0;
6253 
6254 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6255 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6256 			if (refresh_rate > highest_rfr) {
6257 				highest_rfr = refresh_rate;
6258 				master_stream = j;
6259 			}
6260 		}
6261 	}
6262 	for (j = 0;  j < stream_count; j++) {
6263 		if (stream_set[j])
6264 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6265 	}
6266 }
6267 
6268 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6269 {
6270 	int i = 0;
6271 	struct dc_stream_state *stream;
6272 
6273 	if (context->stream_count < 2)
6274 		return;
6275 	for (i = 0; i < context->stream_count ; i++) {
6276 		if (!context->streams[i])
6277 			continue;
6278 		/*
6279 		 * TODO: add a function to read AMD VSDB bits and set
6280 		 * crtc_sync_master.multi_sync_enabled flag
6281 		 * For now it's set to false
6282 		 */
6283 	}
6284 
6285 	set_master_stream(context->streams, context->stream_count);
6286 
6287 	for (i = 0; i < context->stream_count ; i++) {
6288 		stream = context->streams[i];
6289 
6290 		if (!stream)
6291 			continue;
6292 
6293 		set_multisync_trigger_params(stream);
6294 	}
6295 }
6296 
6297 #if defined(CONFIG_DRM_AMD_DC_DCN)
6298 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6299 							struct dc_sink *sink, struct dc_stream_state *stream,
6300 							struct dsc_dec_dpcd_caps *dsc_caps)
6301 {
6302 	stream->timing.flags.DSC = 0;
6303 	dsc_caps->is_dsc_supported = false;
6304 
6305 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6306 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6307 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6308 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6309 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6310 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6311 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6312 				dsc_caps);
6313 	}
6314 }
6315 
6316 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6317 				    struct dc_sink *sink, struct dc_stream_state *stream,
6318 				    struct dsc_dec_dpcd_caps *dsc_caps,
6319 				    uint32_t max_dsc_target_bpp_limit_override)
6320 {
6321 	const struct dc_link_settings *verified_link_cap = NULL;
6322 	uint32_t link_bw_in_kbps;
6323 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6324 	struct dc *dc = sink->ctx->dc;
6325 	struct dc_dsc_bw_range bw_range = {0};
6326 	struct dc_dsc_config dsc_cfg = {0};
6327 
6328 	verified_link_cap = dc_link_get_link_cap(stream->link);
6329 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6330 	edp_min_bpp_x16 = 8 * 16;
6331 	edp_max_bpp_x16 = 8 * 16;
6332 
6333 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6334 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6335 
6336 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6337 		edp_min_bpp_x16 = edp_max_bpp_x16;
6338 
6339 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6340 				dc->debug.dsc_min_slice_height_override,
6341 				edp_min_bpp_x16, edp_max_bpp_x16,
6342 				dsc_caps,
6343 				&stream->timing,
6344 				&bw_range)) {
6345 
6346 		if (bw_range.max_kbps < link_bw_in_kbps) {
6347 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6348 					dsc_caps,
6349 					dc->debug.dsc_min_slice_height_override,
6350 					max_dsc_target_bpp_limit_override,
6351 					0,
6352 					&stream->timing,
6353 					&dsc_cfg)) {
6354 				stream->timing.dsc_cfg = dsc_cfg;
6355 				stream->timing.flags.DSC = 1;
6356 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6357 			}
6358 			return;
6359 		}
6360 	}
6361 
6362 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6363 				dsc_caps,
6364 				dc->debug.dsc_min_slice_height_override,
6365 				max_dsc_target_bpp_limit_override,
6366 				link_bw_in_kbps,
6367 				&stream->timing,
6368 				&dsc_cfg)) {
6369 		stream->timing.dsc_cfg = dsc_cfg;
6370 		stream->timing.flags.DSC = 1;
6371 	}
6372 }
6373 
6374 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6375 										struct dc_sink *sink, struct dc_stream_state *stream,
6376 										struct dsc_dec_dpcd_caps *dsc_caps)
6377 {
6378 	struct drm_connector *drm_connector = &aconnector->base;
6379 	uint32_t link_bandwidth_kbps;
6380 	uint32_t max_dsc_target_bpp_limit_override = 0;
6381 	struct dc *dc = sink->ctx->dc;
6382 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6383 	uint32_t dsc_max_supported_bw_in_kbps;
6384 
6385 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6386 							dc_link_get_link_cap(aconnector->dc_link));
6387 
6388 	if (stream->link && stream->link->local_sink)
6389 		max_dsc_target_bpp_limit_override =
6390 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6391 
6392 	/* Set DSC policy according to dsc_clock_en */
6393 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6394 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6395 
6396 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6397 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6398 
6399 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6400 
6401 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6402 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6403 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6404 						dsc_caps,
6405 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6406 						max_dsc_target_bpp_limit_override,
6407 						link_bandwidth_kbps,
6408 						&stream->timing,
6409 						&stream->timing.dsc_cfg)) {
6410 				stream->timing.flags.DSC = 1;
6411 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6412 								 __func__, drm_connector->name);
6413 			}
6414 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6415 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6416 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6417 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6418 
6419 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6420 					max_supported_bw_in_kbps > 0 &&
6421 					dsc_max_supported_bw_in_kbps > 0)
6422 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6423 						dsc_caps,
6424 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6425 						max_dsc_target_bpp_limit_override,
6426 						dsc_max_supported_bw_in_kbps,
6427 						&stream->timing,
6428 						&stream->timing.dsc_cfg)) {
6429 					stream->timing.flags.DSC = 1;
6430 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6431 									 __func__, drm_connector->name);
6432 				}
6433 		}
6434 	}
6435 
6436 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6437 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6438 		stream->timing.flags.DSC = 1;
6439 
6440 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6441 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6442 
6443 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6444 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6445 
6446 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6447 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6448 }
6449 #endif /* CONFIG_DRM_AMD_DC_DCN */
6450 
6451 /**
6452  * DOC: FreeSync Video
6453  *
6454  * When a userspace application wants to play a video, the content follows a
6455  * standard format definition that usually specifies the FPS for that format.
6456  * The below list illustrates some video format and the expected FPS,
6457  * respectively:
6458  *
6459  * - TV/NTSC (23.976 FPS)
6460  * - Cinema (24 FPS)
6461  * - TV/PAL (25 FPS)
6462  * - TV/NTSC (29.97 FPS)
6463  * - TV/NTSC (30 FPS)
6464  * - Cinema HFR (48 FPS)
6465  * - TV/PAL (50 FPS)
6466  * - Commonly used (60 FPS)
6467  * - Multiples of 24 (48,72,96,120 FPS)
6468  *
6469  * The list of standards video format is not huge and can be added to the
6470  * connector modeset list beforehand. With that, userspace can leverage
6471  * FreeSync to extends the front porch in order to attain the target refresh
6472  * rate. Such a switch will happen seamlessly, without screen blanking or
6473  * reprogramming of the output in any other way. If the userspace requests a
6474  * modesetting change compatible with FreeSync modes that only differ in the
6475  * refresh rate, DC will skip the full update and avoid blink during the
6476  * transition. For example, the video player can change the modesetting from
6477  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6478  * causing any display blink. This same concept can be applied to a mode
6479  * setting change.
6480  */
6481 static struct drm_display_mode *
6482 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6483 			  bool use_probed_modes)
6484 {
6485 	struct drm_display_mode *m, *m_pref = NULL;
6486 	u16 current_refresh, highest_refresh;
6487 	struct list_head *list_head = use_probed_modes ?
6488 						    &aconnector->base.probed_modes :
6489 						    &aconnector->base.modes;
6490 
6491 	if (aconnector->freesync_vid_base.clock != 0)
6492 		return &aconnector->freesync_vid_base;
6493 
6494 	/* Find the preferred mode */
6495 	list_for_each_entry (m, list_head, head) {
6496 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6497 			m_pref = m;
6498 			break;
6499 		}
6500 	}
6501 
6502 	if (!m_pref) {
6503 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6504 		m_pref = list_first_entry_or_null(
6505 			&aconnector->base.modes, struct drm_display_mode, head);
6506 		if (!m_pref) {
6507 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6508 			return NULL;
6509 		}
6510 	}
6511 
6512 	highest_refresh = drm_mode_vrefresh(m_pref);
6513 
6514 	/*
6515 	 * Find the mode with highest refresh rate with same resolution.
6516 	 * For some monitors, preferred mode is not the mode with highest
6517 	 * supported refresh rate.
6518 	 */
6519 	list_for_each_entry (m, list_head, head) {
6520 		current_refresh  = drm_mode_vrefresh(m);
6521 
6522 		if (m->hdisplay == m_pref->hdisplay &&
6523 		    m->vdisplay == m_pref->vdisplay &&
6524 		    highest_refresh < current_refresh) {
6525 			highest_refresh = current_refresh;
6526 			m_pref = m;
6527 		}
6528 	}
6529 
6530 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6531 	return m_pref;
6532 }
6533 
6534 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6535 				   struct amdgpu_dm_connector *aconnector)
6536 {
6537 	struct drm_display_mode *high_mode;
6538 	int timing_diff;
6539 
6540 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6541 	if (!high_mode || !mode)
6542 		return false;
6543 
6544 	timing_diff = high_mode->vtotal - mode->vtotal;
6545 
6546 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6547 	    high_mode->hdisplay != mode->hdisplay ||
6548 	    high_mode->vdisplay != mode->vdisplay ||
6549 	    high_mode->hsync_start != mode->hsync_start ||
6550 	    high_mode->hsync_end != mode->hsync_end ||
6551 	    high_mode->htotal != mode->htotal ||
6552 	    high_mode->hskew != mode->hskew ||
6553 	    high_mode->vscan != mode->vscan ||
6554 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6555 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6556 		return false;
6557 	else
6558 		return true;
6559 }
6560 
6561 static struct dc_stream_state *
6562 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6563 		       const struct drm_display_mode *drm_mode,
6564 		       const struct dm_connector_state *dm_state,
6565 		       const struct dc_stream_state *old_stream,
6566 		       int requested_bpc)
6567 {
6568 	struct drm_display_mode *preferred_mode = NULL;
6569 	struct drm_connector *drm_connector;
6570 	const struct drm_connector_state *con_state =
6571 		dm_state ? &dm_state->base : NULL;
6572 	struct dc_stream_state *stream = NULL;
6573 	struct drm_display_mode mode = *drm_mode;
6574 	struct drm_display_mode saved_mode;
6575 	struct drm_display_mode *freesync_mode = NULL;
6576 	bool native_mode_found = false;
6577 	bool recalculate_timing = false;
6578 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6579 	int mode_refresh;
6580 	int preferred_refresh = 0;
6581 #if defined(CONFIG_DRM_AMD_DC_DCN)
6582 	struct dsc_dec_dpcd_caps dsc_caps;
6583 #endif
6584 	struct dc_sink *sink = NULL;
6585 
6586 	memset(&saved_mode, 0, sizeof(saved_mode));
6587 
6588 	if (aconnector == NULL) {
6589 		DRM_ERROR("aconnector is NULL!\n");
6590 		return stream;
6591 	}
6592 
6593 	drm_connector = &aconnector->base;
6594 
6595 	if (!aconnector->dc_sink) {
6596 		sink = create_fake_sink(aconnector);
6597 		if (!sink)
6598 			return stream;
6599 	} else {
6600 		sink = aconnector->dc_sink;
6601 		dc_sink_retain(sink);
6602 	}
6603 
6604 	stream = dc_create_stream_for_sink(sink);
6605 
6606 	if (stream == NULL) {
6607 		DRM_ERROR("Failed to create stream for sink!\n");
6608 		goto finish;
6609 	}
6610 
6611 	stream->dm_stream_context = aconnector;
6612 
6613 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6614 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6615 
6616 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6617 		/* Search for preferred mode */
6618 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6619 			native_mode_found = true;
6620 			break;
6621 		}
6622 	}
6623 	if (!native_mode_found)
6624 		preferred_mode = list_first_entry_or_null(
6625 				&aconnector->base.modes,
6626 				struct drm_display_mode,
6627 				head);
6628 
6629 	mode_refresh = drm_mode_vrefresh(&mode);
6630 
6631 	if (preferred_mode == NULL) {
6632 		/*
6633 		 * This may not be an error, the use case is when we have no
6634 		 * usermode calls to reset and set mode upon hotplug. In this
6635 		 * case, we call set mode ourselves to restore the previous mode
6636 		 * and the modelist may not be filled in in time.
6637 		 */
6638 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6639 	} else {
6640 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6641 		if (recalculate_timing) {
6642 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6643 			drm_mode_copy(&saved_mode, &mode);
6644 			drm_mode_copy(&mode, freesync_mode);
6645 		} else {
6646 			decide_crtc_timing_for_drm_display_mode(
6647 				&mode, preferred_mode, scale);
6648 
6649 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6650 		}
6651 	}
6652 
6653 	if (recalculate_timing)
6654 		drm_mode_set_crtcinfo(&saved_mode, 0);
6655 	else if (!dm_state)
6656 		drm_mode_set_crtcinfo(&mode, 0);
6657 
6658        /*
6659 	* If scaling is enabled and refresh rate didn't change
6660 	* we copy the vic and polarities of the old timings
6661 	*/
6662 	if (!scale || mode_refresh != preferred_refresh)
6663 		fill_stream_properties_from_drm_display_mode(
6664 			stream, &mode, &aconnector->base, con_state, NULL,
6665 			requested_bpc);
6666 	else
6667 		fill_stream_properties_from_drm_display_mode(
6668 			stream, &mode, &aconnector->base, con_state, old_stream,
6669 			requested_bpc);
6670 
6671 #if defined(CONFIG_DRM_AMD_DC_DCN)
6672 	/* SST DSC determination policy */
6673 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6674 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6675 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6676 #endif
6677 
6678 	update_stream_scaling_settings(&mode, dm_state, stream);
6679 
6680 	fill_audio_info(
6681 		&stream->audio_info,
6682 		drm_connector,
6683 		sink);
6684 
6685 	update_stream_signal(stream, sink);
6686 
6687 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6688 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6689 
6690 	if (stream->link->psr_settings.psr_feature_enabled) {
6691 		//
6692 		// should decide stream support vsc sdp colorimetry capability
6693 		// before building vsc info packet
6694 		//
6695 		stream->use_vsc_sdp_for_colorimetry = false;
6696 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6697 			stream->use_vsc_sdp_for_colorimetry =
6698 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6699 		} else {
6700 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6701 				stream->use_vsc_sdp_for_colorimetry = true;
6702 		}
6703 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6704 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6705 
6706 	}
6707 finish:
6708 	dc_sink_release(sink);
6709 
6710 	return stream;
6711 }
6712 
6713 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6714 {
6715 	drm_crtc_cleanup(crtc);
6716 	kfree(crtc);
6717 }
6718 
6719 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6720 				  struct drm_crtc_state *state)
6721 {
6722 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6723 
6724 	/* TODO Destroy dc_stream objects are stream object is flattened */
6725 	if (cur->stream)
6726 		dc_stream_release(cur->stream);
6727 
6728 
6729 	__drm_atomic_helper_crtc_destroy_state(state);
6730 
6731 
6732 	kfree(state);
6733 }
6734 
6735 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6736 {
6737 	struct dm_crtc_state *state;
6738 
6739 	if (crtc->state)
6740 		dm_crtc_destroy_state(crtc, crtc->state);
6741 
6742 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6743 	if (WARN_ON(!state))
6744 		return;
6745 
6746 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6747 }
6748 
6749 static struct drm_crtc_state *
6750 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6751 {
6752 	struct dm_crtc_state *state, *cur;
6753 
6754 	cur = to_dm_crtc_state(crtc->state);
6755 
6756 	if (WARN_ON(!crtc->state))
6757 		return NULL;
6758 
6759 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6760 	if (!state)
6761 		return NULL;
6762 
6763 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6764 
6765 	if (cur->stream) {
6766 		state->stream = cur->stream;
6767 		dc_stream_retain(state->stream);
6768 	}
6769 
6770 	state->active_planes = cur->active_planes;
6771 	state->vrr_infopacket = cur->vrr_infopacket;
6772 	state->abm_level = cur->abm_level;
6773 	state->vrr_supported = cur->vrr_supported;
6774 	state->freesync_config = cur->freesync_config;
6775 	state->cm_has_degamma = cur->cm_has_degamma;
6776 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6777 	state->mpo_requested = cur->mpo_requested;
6778 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6779 
6780 	return &state->base;
6781 }
6782 
6783 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6784 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6785 {
6786 	crtc_debugfs_init(crtc);
6787 
6788 	return 0;
6789 }
6790 #endif
6791 
6792 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6793 {
6794 	enum dc_irq_source irq_source;
6795 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6796 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6797 	int rc;
6798 
6799 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6800 
6801 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6802 
6803 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6804 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6805 	return rc;
6806 }
6807 
6808 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6809 {
6810 	enum dc_irq_source irq_source;
6811 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6812 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6813 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6814 	struct amdgpu_display_manager *dm = &adev->dm;
6815 	struct vblank_control_work *work;
6816 	int rc = 0;
6817 
6818 	if (enable) {
6819 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6820 		if (amdgpu_dm_vrr_active(acrtc_state))
6821 			rc = dm_set_vupdate_irq(crtc, true);
6822 	} else {
6823 		/* vblank irq off -> vupdate irq off */
6824 		rc = dm_set_vupdate_irq(crtc, false);
6825 	}
6826 
6827 	if (rc)
6828 		return rc;
6829 
6830 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6831 
6832 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6833 		return -EBUSY;
6834 
6835 	if (amdgpu_in_reset(adev))
6836 		return 0;
6837 
6838 	if (dm->vblank_control_workqueue) {
6839 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6840 		if (!work)
6841 			return -ENOMEM;
6842 
6843 		INIT_WORK(&work->work, vblank_control_worker);
6844 		work->dm = dm;
6845 		work->acrtc = acrtc;
6846 		work->enable = enable;
6847 
6848 		if (acrtc_state->stream) {
6849 			dc_stream_retain(acrtc_state->stream);
6850 			work->stream = acrtc_state->stream;
6851 		}
6852 
6853 		queue_work(dm->vblank_control_workqueue, &work->work);
6854 	}
6855 
6856 	return 0;
6857 }
6858 
6859 static int dm_enable_vblank(struct drm_crtc *crtc)
6860 {
6861 	return dm_set_vblank(crtc, true);
6862 }
6863 
6864 static void dm_disable_vblank(struct drm_crtc *crtc)
6865 {
6866 	dm_set_vblank(crtc, false);
6867 }
6868 
6869 /* Implemented only the options currently available for the driver */
6870 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6871 	.reset = dm_crtc_reset_state,
6872 	.destroy = amdgpu_dm_crtc_destroy,
6873 	.set_config = drm_atomic_helper_set_config,
6874 	.page_flip = drm_atomic_helper_page_flip,
6875 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6876 	.atomic_destroy_state = dm_crtc_destroy_state,
6877 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6878 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6879 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6880 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6881 	.enable_vblank = dm_enable_vblank,
6882 	.disable_vblank = dm_disable_vblank,
6883 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6884 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6885 	.late_register = amdgpu_dm_crtc_late_register,
6886 #endif
6887 };
6888 
6889 static enum drm_connector_status
6890 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6891 {
6892 	bool connected;
6893 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6894 
6895 	/*
6896 	 * Notes:
6897 	 * 1. This interface is NOT called in context of HPD irq.
6898 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6899 	 * makes it a bad place for *any* MST-related activity.
6900 	 */
6901 
6902 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6903 	    !aconnector->fake_enable)
6904 		connected = (aconnector->dc_sink != NULL);
6905 	else
6906 		connected = (aconnector->base.force == DRM_FORCE_ON);
6907 
6908 	update_subconnector_property(aconnector);
6909 
6910 	return (connected ? connector_status_connected :
6911 			connector_status_disconnected);
6912 }
6913 
6914 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6915 					    struct drm_connector_state *connector_state,
6916 					    struct drm_property *property,
6917 					    uint64_t val)
6918 {
6919 	struct drm_device *dev = connector->dev;
6920 	struct amdgpu_device *adev = drm_to_adev(dev);
6921 	struct dm_connector_state *dm_old_state =
6922 		to_dm_connector_state(connector->state);
6923 	struct dm_connector_state *dm_new_state =
6924 		to_dm_connector_state(connector_state);
6925 
6926 	int ret = -EINVAL;
6927 
6928 	if (property == dev->mode_config.scaling_mode_property) {
6929 		enum amdgpu_rmx_type rmx_type;
6930 
6931 		switch (val) {
6932 		case DRM_MODE_SCALE_CENTER:
6933 			rmx_type = RMX_CENTER;
6934 			break;
6935 		case DRM_MODE_SCALE_ASPECT:
6936 			rmx_type = RMX_ASPECT;
6937 			break;
6938 		case DRM_MODE_SCALE_FULLSCREEN:
6939 			rmx_type = RMX_FULL;
6940 			break;
6941 		case DRM_MODE_SCALE_NONE:
6942 		default:
6943 			rmx_type = RMX_OFF;
6944 			break;
6945 		}
6946 
6947 		if (dm_old_state->scaling == rmx_type)
6948 			return 0;
6949 
6950 		dm_new_state->scaling = rmx_type;
6951 		ret = 0;
6952 	} else if (property == adev->mode_info.underscan_hborder_property) {
6953 		dm_new_state->underscan_hborder = val;
6954 		ret = 0;
6955 	} else if (property == adev->mode_info.underscan_vborder_property) {
6956 		dm_new_state->underscan_vborder = val;
6957 		ret = 0;
6958 	} else if (property == adev->mode_info.underscan_property) {
6959 		dm_new_state->underscan_enable = val;
6960 		ret = 0;
6961 	} else if (property == adev->mode_info.abm_level_property) {
6962 		dm_new_state->abm_level = val;
6963 		ret = 0;
6964 	}
6965 
6966 	return ret;
6967 }
6968 
6969 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6970 					    const struct drm_connector_state *state,
6971 					    struct drm_property *property,
6972 					    uint64_t *val)
6973 {
6974 	struct drm_device *dev = connector->dev;
6975 	struct amdgpu_device *adev = drm_to_adev(dev);
6976 	struct dm_connector_state *dm_state =
6977 		to_dm_connector_state(state);
6978 	int ret = -EINVAL;
6979 
6980 	if (property == dev->mode_config.scaling_mode_property) {
6981 		switch (dm_state->scaling) {
6982 		case RMX_CENTER:
6983 			*val = DRM_MODE_SCALE_CENTER;
6984 			break;
6985 		case RMX_ASPECT:
6986 			*val = DRM_MODE_SCALE_ASPECT;
6987 			break;
6988 		case RMX_FULL:
6989 			*val = DRM_MODE_SCALE_FULLSCREEN;
6990 			break;
6991 		case RMX_OFF:
6992 		default:
6993 			*val = DRM_MODE_SCALE_NONE;
6994 			break;
6995 		}
6996 		ret = 0;
6997 	} else if (property == adev->mode_info.underscan_hborder_property) {
6998 		*val = dm_state->underscan_hborder;
6999 		ret = 0;
7000 	} else if (property == adev->mode_info.underscan_vborder_property) {
7001 		*val = dm_state->underscan_vborder;
7002 		ret = 0;
7003 	} else if (property == adev->mode_info.underscan_property) {
7004 		*val = dm_state->underscan_enable;
7005 		ret = 0;
7006 	} else if (property == adev->mode_info.abm_level_property) {
7007 		*val = dm_state->abm_level;
7008 		ret = 0;
7009 	}
7010 
7011 	return ret;
7012 }
7013 
7014 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7015 {
7016 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7017 
7018 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7019 }
7020 
7021 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
7022 {
7023 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7024 	const struct dc_link *link = aconnector->dc_link;
7025 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7026 	struct amdgpu_display_manager *dm = &adev->dm;
7027 	int i;
7028 
7029 	/*
7030 	 * Call only if mst_mgr was iniitalized before since it's not done
7031 	 * for all connector types.
7032 	 */
7033 	if (aconnector->mst_mgr.dev)
7034 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7035 
7036 	for (i = 0; i < dm->num_of_edps; i++) {
7037 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
7038 			backlight_device_unregister(dm->backlight_dev[i]);
7039 			dm->backlight_dev[i] = NULL;
7040 		}
7041 	}
7042 
7043 	if (aconnector->dc_em_sink)
7044 		dc_sink_release(aconnector->dc_em_sink);
7045 	aconnector->dc_em_sink = NULL;
7046 	if (aconnector->dc_sink)
7047 		dc_sink_release(aconnector->dc_sink);
7048 	aconnector->dc_sink = NULL;
7049 
7050 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
7051 	drm_connector_unregister(connector);
7052 	drm_connector_cleanup(connector);
7053 	if (aconnector->i2c) {
7054 		i2c_del_adapter(&aconnector->i2c->base);
7055 		kfree(aconnector->i2c);
7056 	}
7057 	kfree(aconnector->dm_dp_aux.aux.name);
7058 
7059 	kfree(connector);
7060 }
7061 
7062 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7063 {
7064 	struct dm_connector_state *state =
7065 		to_dm_connector_state(connector->state);
7066 
7067 	if (connector->state)
7068 		__drm_atomic_helper_connector_destroy_state(connector->state);
7069 
7070 	kfree(state);
7071 
7072 	state = kzalloc(sizeof(*state), GFP_KERNEL);
7073 
7074 	if (state) {
7075 		state->scaling = RMX_OFF;
7076 		state->underscan_enable = false;
7077 		state->underscan_hborder = 0;
7078 		state->underscan_vborder = 0;
7079 		state->base.max_requested_bpc = 8;
7080 		state->vcpi_slots = 0;
7081 		state->pbn = 0;
7082 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
7083 			state->abm_level = amdgpu_dm_abm_level;
7084 
7085 		__drm_atomic_helper_connector_reset(connector, &state->base);
7086 	}
7087 }
7088 
7089 struct drm_connector_state *
7090 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
7091 {
7092 	struct dm_connector_state *state =
7093 		to_dm_connector_state(connector->state);
7094 
7095 	struct dm_connector_state *new_state =
7096 			kmemdup(state, sizeof(*state), GFP_KERNEL);
7097 
7098 	if (!new_state)
7099 		return NULL;
7100 
7101 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7102 
7103 	new_state->freesync_capable = state->freesync_capable;
7104 	new_state->abm_level = state->abm_level;
7105 	new_state->scaling = state->scaling;
7106 	new_state->underscan_enable = state->underscan_enable;
7107 	new_state->underscan_hborder = state->underscan_hborder;
7108 	new_state->underscan_vborder = state->underscan_vborder;
7109 	new_state->vcpi_slots = state->vcpi_slots;
7110 	new_state->pbn = state->pbn;
7111 	return &new_state->base;
7112 }
7113 
7114 static int
7115 amdgpu_dm_connector_late_register(struct drm_connector *connector)
7116 {
7117 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7118 		to_amdgpu_dm_connector(connector);
7119 	int r;
7120 
7121 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7122 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7123 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7124 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7125 		if (r)
7126 			return r;
7127 	}
7128 
7129 #if defined(CONFIG_DEBUG_FS)
7130 	connector_debugfs_init(amdgpu_dm_connector);
7131 #endif
7132 
7133 	return 0;
7134 }
7135 
7136 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7137 	.reset = amdgpu_dm_connector_funcs_reset,
7138 	.detect = amdgpu_dm_connector_detect,
7139 	.fill_modes = drm_helper_probe_single_connector_modes,
7140 	.destroy = amdgpu_dm_connector_destroy,
7141 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7142 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7143 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7144 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7145 	.late_register = amdgpu_dm_connector_late_register,
7146 	.early_unregister = amdgpu_dm_connector_unregister
7147 };
7148 
7149 static int get_modes(struct drm_connector *connector)
7150 {
7151 	return amdgpu_dm_connector_get_modes(connector);
7152 }
7153 
7154 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7155 {
7156 	struct dc_sink_init_data init_params = {
7157 			.link = aconnector->dc_link,
7158 			.sink_signal = SIGNAL_TYPE_VIRTUAL
7159 	};
7160 	struct edid *edid;
7161 
7162 	if (!aconnector->base.edid_blob_ptr) {
7163 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7164 				aconnector->base.name);
7165 
7166 		aconnector->base.force = DRM_FORCE_OFF;
7167 		aconnector->base.override_edid = false;
7168 		return;
7169 	}
7170 
7171 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7172 
7173 	aconnector->edid = edid;
7174 
7175 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7176 		aconnector->dc_link,
7177 		(uint8_t *)edid,
7178 		(edid->extensions + 1) * EDID_LENGTH,
7179 		&init_params);
7180 
7181 	if (aconnector->base.force == DRM_FORCE_ON) {
7182 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7183 		aconnector->dc_link->local_sink :
7184 		aconnector->dc_em_sink;
7185 		dc_sink_retain(aconnector->dc_sink);
7186 	}
7187 }
7188 
7189 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7190 {
7191 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7192 
7193 	/*
7194 	 * In case of headless boot with force on for DP managed connector
7195 	 * Those settings have to be != 0 to get initial modeset
7196 	 */
7197 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7198 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7199 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7200 	}
7201 
7202 
7203 	aconnector->base.override_edid = true;
7204 	create_eml_sink(aconnector);
7205 }
7206 
7207 struct dc_stream_state *
7208 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7209 				const struct drm_display_mode *drm_mode,
7210 				const struct dm_connector_state *dm_state,
7211 				const struct dc_stream_state *old_stream)
7212 {
7213 	struct drm_connector *connector = &aconnector->base;
7214 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7215 	struct dc_stream_state *stream;
7216 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7217 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7218 	enum dc_status dc_result = DC_OK;
7219 
7220 	do {
7221 		stream = create_stream_for_sink(aconnector, drm_mode,
7222 						dm_state, old_stream,
7223 						requested_bpc);
7224 		if (stream == NULL) {
7225 			DRM_ERROR("Failed to create stream for sink!\n");
7226 			break;
7227 		}
7228 
7229 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7230 		if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
7231 			dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
7232 
7233 		if (dc_result != DC_OK) {
7234 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7235 				      drm_mode->hdisplay,
7236 				      drm_mode->vdisplay,
7237 				      drm_mode->clock,
7238 				      dc_result,
7239 				      dc_status_to_str(dc_result));
7240 
7241 			dc_stream_release(stream);
7242 			stream = NULL;
7243 			requested_bpc -= 2; /* lower bpc to retry validation */
7244 		}
7245 
7246 	} while (stream == NULL && requested_bpc >= 6);
7247 
7248 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7249 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7250 
7251 		aconnector->force_yuv420_output = true;
7252 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7253 						dm_state, old_stream);
7254 		aconnector->force_yuv420_output = false;
7255 	}
7256 
7257 	return stream;
7258 }
7259 
7260 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7261 				   struct drm_display_mode *mode)
7262 {
7263 	int result = MODE_ERROR;
7264 	struct dc_sink *dc_sink;
7265 	/* TODO: Unhardcode stream count */
7266 	struct dc_stream_state *stream;
7267 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7268 
7269 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7270 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7271 		return result;
7272 
7273 	/*
7274 	 * Only run this the first time mode_valid is called to initilialize
7275 	 * EDID mgmt
7276 	 */
7277 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7278 		!aconnector->dc_em_sink)
7279 		handle_edid_mgmt(aconnector);
7280 
7281 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7282 
7283 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7284 				aconnector->base.force != DRM_FORCE_ON) {
7285 		DRM_ERROR("dc_sink is NULL!\n");
7286 		goto fail;
7287 	}
7288 
7289 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7290 	if (stream) {
7291 		dc_stream_release(stream);
7292 		result = MODE_OK;
7293 	}
7294 
7295 fail:
7296 	/* TODO: error handling*/
7297 	return result;
7298 }
7299 
7300 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7301 				struct dc_info_packet *out)
7302 {
7303 	struct hdmi_drm_infoframe frame;
7304 	unsigned char buf[30]; /* 26 + 4 */
7305 	ssize_t len;
7306 	int ret, i;
7307 
7308 	memset(out, 0, sizeof(*out));
7309 
7310 	if (!state->hdr_output_metadata)
7311 		return 0;
7312 
7313 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7314 	if (ret)
7315 		return ret;
7316 
7317 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7318 	if (len < 0)
7319 		return (int)len;
7320 
7321 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7322 	if (len != 30)
7323 		return -EINVAL;
7324 
7325 	/* Prepare the infopacket for DC. */
7326 	switch (state->connector->connector_type) {
7327 	case DRM_MODE_CONNECTOR_HDMIA:
7328 		out->hb0 = 0x87; /* type */
7329 		out->hb1 = 0x01; /* version */
7330 		out->hb2 = 0x1A; /* length */
7331 		out->sb[0] = buf[3]; /* checksum */
7332 		i = 1;
7333 		break;
7334 
7335 	case DRM_MODE_CONNECTOR_DisplayPort:
7336 	case DRM_MODE_CONNECTOR_eDP:
7337 		out->hb0 = 0x00; /* sdp id, zero */
7338 		out->hb1 = 0x87; /* type */
7339 		out->hb2 = 0x1D; /* payload len - 1 */
7340 		out->hb3 = (0x13 << 2); /* sdp version */
7341 		out->sb[0] = 0x01; /* version */
7342 		out->sb[1] = 0x1A; /* length */
7343 		i = 2;
7344 		break;
7345 
7346 	default:
7347 		return -EINVAL;
7348 	}
7349 
7350 	memcpy(&out->sb[i], &buf[4], 26);
7351 	out->valid = true;
7352 
7353 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7354 		       sizeof(out->sb), false);
7355 
7356 	return 0;
7357 }
7358 
7359 static int
7360 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7361 				 struct drm_atomic_state *state)
7362 {
7363 	struct drm_connector_state *new_con_state =
7364 		drm_atomic_get_new_connector_state(state, conn);
7365 	struct drm_connector_state *old_con_state =
7366 		drm_atomic_get_old_connector_state(state, conn);
7367 	struct drm_crtc *crtc = new_con_state->crtc;
7368 	struct drm_crtc_state *new_crtc_state;
7369 	int ret;
7370 
7371 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7372 
7373 	if (!crtc)
7374 		return 0;
7375 
7376 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7377 		struct dc_info_packet hdr_infopacket;
7378 
7379 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7380 		if (ret)
7381 			return ret;
7382 
7383 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7384 		if (IS_ERR(new_crtc_state))
7385 			return PTR_ERR(new_crtc_state);
7386 
7387 		/*
7388 		 * DC considers the stream backends changed if the
7389 		 * static metadata changes. Forcing the modeset also
7390 		 * gives a simple way for userspace to switch from
7391 		 * 8bpc to 10bpc when setting the metadata to enter
7392 		 * or exit HDR.
7393 		 *
7394 		 * Changing the static metadata after it's been
7395 		 * set is permissible, however. So only force a
7396 		 * modeset if we're entering or exiting HDR.
7397 		 */
7398 		new_crtc_state->mode_changed =
7399 			!old_con_state->hdr_output_metadata ||
7400 			!new_con_state->hdr_output_metadata;
7401 	}
7402 
7403 	return 0;
7404 }
7405 
7406 static const struct drm_connector_helper_funcs
7407 amdgpu_dm_connector_helper_funcs = {
7408 	/*
7409 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7410 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7411 	 * are missing after user start lightdm. So we need to renew modes list.
7412 	 * in get_modes call back, not just return the modes count
7413 	 */
7414 	.get_modes = get_modes,
7415 	.mode_valid = amdgpu_dm_connector_mode_valid,
7416 	.atomic_check = amdgpu_dm_connector_atomic_check,
7417 };
7418 
7419 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7420 {
7421 }
7422 
7423 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7424 {
7425 	struct drm_atomic_state *state = new_crtc_state->state;
7426 	struct drm_plane *plane;
7427 	int num_active = 0;
7428 
7429 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7430 		struct drm_plane_state *new_plane_state;
7431 
7432 		/* Cursor planes are "fake". */
7433 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7434 			continue;
7435 
7436 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7437 
7438 		if (!new_plane_state) {
7439 			/*
7440 			 * The plane is enable on the CRTC and hasn't changed
7441 			 * state. This means that it previously passed
7442 			 * validation and is therefore enabled.
7443 			 */
7444 			num_active += 1;
7445 			continue;
7446 		}
7447 
7448 		/* We need a framebuffer to be considered enabled. */
7449 		num_active += (new_plane_state->fb != NULL);
7450 	}
7451 
7452 	return num_active;
7453 }
7454 
7455 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7456 					 struct drm_crtc_state *new_crtc_state)
7457 {
7458 	struct dm_crtc_state *dm_new_crtc_state =
7459 		to_dm_crtc_state(new_crtc_state);
7460 
7461 	dm_new_crtc_state->active_planes = 0;
7462 
7463 	if (!dm_new_crtc_state->stream)
7464 		return;
7465 
7466 	dm_new_crtc_state->active_planes =
7467 		count_crtc_active_planes(new_crtc_state);
7468 }
7469 
7470 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7471 				       struct drm_atomic_state *state)
7472 {
7473 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7474 									  crtc);
7475 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7476 	struct dc *dc = adev->dm.dc;
7477 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7478 	int ret = -EINVAL;
7479 
7480 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7481 
7482 	dm_update_crtc_active_planes(crtc, crtc_state);
7483 
7484 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7485 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7486 		return ret;
7487 	}
7488 
7489 	/*
7490 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7491 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7492 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7493 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7494 	 */
7495 	if (crtc_state->enable &&
7496 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7497 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7498 		return -EINVAL;
7499 	}
7500 
7501 	/* In some use cases, like reset, no stream is attached */
7502 	if (!dm_crtc_state->stream)
7503 		return 0;
7504 
7505 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7506 		return 0;
7507 
7508 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7509 	return ret;
7510 }
7511 
7512 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7513 				      const struct drm_display_mode *mode,
7514 				      struct drm_display_mode *adjusted_mode)
7515 {
7516 	return true;
7517 }
7518 
7519 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7520 	.disable = dm_crtc_helper_disable,
7521 	.atomic_check = dm_crtc_helper_atomic_check,
7522 	.mode_fixup = dm_crtc_helper_mode_fixup,
7523 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7524 };
7525 
7526 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7527 {
7528 
7529 }
7530 
7531 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
7532 {
7533 	switch (display_color_depth) {
7534 		case COLOR_DEPTH_666:
7535 			return 6;
7536 		case COLOR_DEPTH_888:
7537 			return 8;
7538 		case COLOR_DEPTH_101010:
7539 			return 10;
7540 		case COLOR_DEPTH_121212:
7541 			return 12;
7542 		case COLOR_DEPTH_141414:
7543 			return 14;
7544 		case COLOR_DEPTH_161616:
7545 			return 16;
7546 		default:
7547 			break;
7548 		}
7549 	return 0;
7550 }
7551 
7552 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7553 					  struct drm_crtc_state *crtc_state,
7554 					  struct drm_connector_state *conn_state)
7555 {
7556 	struct drm_atomic_state *state = crtc_state->state;
7557 	struct drm_connector *connector = conn_state->connector;
7558 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7559 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7560 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7561 	struct drm_dp_mst_topology_mgr *mst_mgr;
7562 	struct drm_dp_mst_port *mst_port;
7563 	enum dc_color_depth color_depth;
7564 	int clock, bpp = 0;
7565 	bool is_y420 = false;
7566 
7567 	if (!aconnector->port || !aconnector->dc_sink)
7568 		return 0;
7569 
7570 	mst_port = aconnector->port;
7571 	mst_mgr = &aconnector->mst_port->mst_mgr;
7572 
7573 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7574 		return 0;
7575 
7576 	if (!state->duplicated) {
7577 		int max_bpc = conn_state->max_requested_bpc;
7578 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7579 				aconnector->force_yuv420_output;
7580 		color_depth = convert_color_depth_from_display_info(connector,
7581 								    is_y420,
7582 								    max_bpc);
7583 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7584 		clock = adjusted_mode->clock;
7585 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7586 	}
7587 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7588 									   mst_mgr,
7589 									   mst_port,
7590 									   dm_new_connector_state->pbn,
7591 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7592 	if (dm_new_connector_state->vcpi_slots < 0) {
7593 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7594 		return dm_new_connector_state->vcpi_slots;
7595 	}
7596 	return 0;
7597 }
7598 
7599 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7600 	.disable = dm_encoder_helper_disable,
7601 	.atomic_check = dm_encoder_helper_atomic_check
7602 };
7603 
7604 #if defined(CONFIG_DRM_AMD_DC_DCN)
7605 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7606 					    struct dc_state *dc_state,
7607 					    struct dsc_mst_fairness_vars *vars)
7608 {
7609 	struct dc_stream_state *stream = NULL;
7610 	struct drm_connector *connector;
7611 	struct drm_connector_state *new_con_state;
7612 	struct amdgpu_dm_connector *aconnector;
7613 	struct dm_connector_state *dm_conn_state;
7614 	int i, j;
7615 	int vcpi, pbn_div, pbn, slot_num = 0;
7616 
7617 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7618 
7619 		aconnector = to_amdgpu_dm_connector(connector);
7620 
7621 		if (!aconnector->port)
7622 			continue;
7623 
7624 		if (!new_con_state || !new_con_state->crtc)
7625 			continue;
7626 
7627 		dm_conn_state = to_dm_connector_state(new_con_state);
7628 
7629 		for (j = 0; j < dc_state->stream_count; j++) {
7630 			stream = dc_state->streams[j];
7631 			if (!stream)
7632 				continue;
7633 
7634 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7635 				break;
7636 
7637 			stream = NULL;
7638 		}
7639 
7640 		if (!stream)
7641 			continue;
7642 
7643 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7644 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7645 		for (j = 0; j < dc_state->stream_count; j++) {
7646 			if (vars[j].aconnector == aconnector) {
7647 				pbn = vars[j].pbn;
7648 				break;
7649 			}
7650 		}
7651 
7652 		if (j == dc_state->stream_count)
7653 			continue;
7654 
7655 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7656 
7657 		if (stream->timing.flags.DSC != 1) {
7658 			dm_conn_state->pbn = pbn;
7659 			dm_conn_state->vcpi_slots = slot_num;
7660 
7661 			drm_dp_mst_atomic_enable_dsc(state,
7662 						     aconnector->port,
7663 						     dm_conn_state->pbn,
7664 						     0,
7665 						     false);
7666 			continue;
7667 		}
7668 
7669 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7670 						    aconnector->port,
7671 						    pbn, pbn_div,
7672 						    true);
7673 		if (vcpi < 0)
7674 			return vcpi;
7675 
7676 		dm_conn_state->pbn = pbn;
7677 		dm_conn_state->vcpi_slots = vcpi;
7678 	}
7679 	return 0;
7680 }
7681 #endif
7682 
7683 static void dm_drm_plane_reset(struct drm_plane *plane)
7684 {
7685 	struct dm_plane_state *amdgpu_state = NULL;
7686 
7687 	if (plane->state)
7688 		plane->funcs->atomic_destroy_state(plane, plane->state);
7689 
7690 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7691 	WARN_ON(amdgpu_state == NULL);
7692 
7693 	if (amdgpu_state)
7694 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7695 }
7696 
7697 static struct drm_plane_state *
7698 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7699 {
7700 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7701 
7702 	old_dm_plane_state = to_dm_plane_state(plane->state);
7703 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7704 	if (!dm_plane_state)
7705 		return NULL;
7706 
7707 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7708 
7709 	if (old_dm_plane_state->dc_state) {
7710 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7711 		dc_plane_state_retain(dm_plane_state->dc_state);
7712 	}
7713 
7714 	return &dm_plane_state->base;
7715 }
7716 
7717 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7718 				struct drm_plane_state *state)
7719 {
7720 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7721 
7722 	if (dm_plane_state->dc_state)
7723 		dc_plane_state_release(dm_plane_state->dc_state);
7724 
7725 	drm_atomic_helper_plane_destroy_state(plane, state);
7726 }
7727 
7728 static const struct drm_plane_funcs dm_plane_funcs = {
7729 	.update_plane	= drm_atomic_helper_update_plane,
7730 	.disable_plane	= drm_atomic_helper_disable_plane,
7731 	.destroy	= drm_primary_helper_destroy,
7732 	.reset = dm_drm_plane_reset,
7733 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7734 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7735 	.format_mod_supported = dm_plane_format_mod_supported,
7736 };
7737 
7738 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7739 				      struct drm_plane_state *new_state)
7740 {
7741 	struct amdgpu_framebuffer *afb;
7742 	struct drm_gem_object *obj;
7743 	struct amdgpu_device *adev;
7744 	struct amdgpu_bo *rbo;
7745 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7746 	uint32_t domain;
7747 	int r;
7748 
7749 	if (!new_state->fb) {
7750 		DRM_DEBUG_KMS("No FB bound\n");
7751 		return 0;
7752 	}
7753 
7754 	afb = to_amdgpu_framebuffer(new_state->fb);
7755 	obj = new_state->fb->obj[0];
7756 	rbo = gem_to_amdgpu_bo(obj);
7757 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7758 
7759 	r = amdgpu_bo_reserve(rbo, true);
7760 	if (r) {
7761 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7762 		return r;
7763 	}
7764 
7765 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7766 	if (r) {
7767 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7768 		goto error_unlock;
7769 	}
7770 
7771 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7772 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7773 	else
7774 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7775 
7776 	r = amdgpu_bo_pin(rbo, domain);
7777 	if (unlikely(r != 0)) {
7778 		if (r != -ERESTARTSYS)
7779 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7780 		goto error_unlock;
7781 	}
7782 
7783 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7784 	if (unlikely(r != 0)) {
7785 		DRM_ERROR("%p bind failed\n", rbo);
7786 		goto error_unpin;
7787 	}
7788 
7789 	amdgpu_bo_unreserve(rbo);
7790 
7791 	afb->address = amdgpu_bo_gpu_offset(rbo);
7792 
7793 	amdgpu_bo_ref(rbo);
7794 
7795 	/**
7796 	 * We don't do surface updates on planes that have been newly created,
7797 	 * but we also don't have the afb->address during atomic check.
7798 	 *
7799 	 * Fill in buffer attributes depending on the address here, but only on
7800 	 * newly created planes since they're not being used by DC yet and this
7801 	 * won't modify global state.
7802 	 */
7803 	dm_plane_state_old = to_dm_plane_state(plane->state);
7804 	dm_plane_state_new = to_dm_plane_state(new_state);
7805 
7806 	if (dm_plane_state_new->dc_state &&
7807 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7808 		struct dc_plane_state *plane_state =
7809 			dm_plane_state_new->dc_state;
7810 		bool force_disable_dcc = !plane_state->dcc.enable;
7811 
7812 		fill_plane_buffer_attributes(
7813 			adev, afb, plane_state->format, plane_state->rotation,
7814 			afb->tiling_flags,
7815 			&plane_state->tiling_info, &plane_state->plane_size,
7816 			&plane_state->dcc, &plane_state->address,
7817 			afb->tmz_surface, force_disable_dcc);
7818 	}
7819 
7820 	return 0;
7821 
7822 error_unpin:
7823 	amdgpu_bo_unpin(rbo);
7824 
7825 error_unlock:
7826 	amdgpu_bo_unreserve(rbo);
7827 	return r;
7828 }
7829 
7830 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7831 				       struct drm_plane_state *old_state)
7832 {
7833 	struct amdgpu_bo *rbo;
7834 	int r;
7835 
7836 	if (!old_state->fb)
7837 		return;
7838 
7839 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7840 	r = amdgpu_bo_reserve(rbo, false);
7841 	if (unlikely(r)) {
7842 		DRM_ERROR("failed to reserve rbo before unpin\n");
7843 		return;
7844 	}
7845 
7846 	amdgpu_bo_unpin(rbo);
7847 	amdgpu_bo_unreserve(rbo);
7848 	amdgpu_bo_unref(&rbo);
7849 }
7850 
7851 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7852 				       struct drm_crtc_state *new_crtc_state)
7853 {
7854 	struct drm_framebuffer *fb = state->fb;
7855 	int min_downscale, max_upscale;
7856 	int min_scale = 0;
7857 	int max_scale = INT_MAX;
7858 
7859 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7860 	if (fb && state->crtc) {
7861 		/* Validate viewport to cover the case when only the position changes */
7862 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7863 			int viewport_width = state->crtc_w;
7864 			int viewport_height = state->crtc_h;
7865 
7866 			if (state->crtc_x < 0)
7867 				viewport_width += state->crtc_x;
7868 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7869 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7870 
7871 			if (state->crtc_y < 0)
7872 				viewport_height += state->crtc_y;
7873 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7874 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7875 
7876 			if (viewport_width < 0 || viewport_height < 0) {
7877 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7878 				return -EINVAL;
7879 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7880 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7881 				return -EINVAL;
7882 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7883 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7884 				return -EINVAL;
7885 			}
7886 
7887 		}
7888 
7889 		/* Get min/max allowed scaling factors from plane caps. */
7890 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7891 					     &min_downscale, &max_upscale);
7892 		/*
7893 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7894 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7895 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7896 		 */
7897 		min_scale = (1000 << 16) / max_upscale;
7898 		max_scale = (1000 << 16) / min_downscale;
7899 	}
7900 
7901 	return drm_atomic_helper_check_plane_state(
7902 		state, new_crtc_state, min_scale, max_scale, true, true);
7903 }
7904 
7905 static int dm_plane_atomic_check(struct drm_plane *plane,
7906 				 struct drm_atomic_state *state)
7907 {
7908 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7909 										 plane);
7910 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7911 	struct dc *dc = adev->dm.dc;
7912 	struct dm_plane_state *dm_plane_state;
7913 	struct dc_scaling_info scaling_info;
7914 	struct drm_crtc_state *new_crtc_state;
7915 	int ret;
7916 
7917 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7918 
7919 	dm_plane_state = to_dm_plane_state(new_plane_state);
7920 
7921 	if (!dm_plane_state->dc_state)
7922 		return 0;
7923 
7924 	new_crtc_state =
7925 		drm_atomic_get_new_crtc_state(state,
7926 					      new_plane_state->crtc);
7927 	if (!new_crtc_state)
7928 		return -EINVAL;
7929 
7930 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7931 	if (ret)
7932 		return ret;
7933 
7934 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7935 	if (ret)
7936 		return ret;
7937 
7938 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7939 		return 0;
7940 
7941 	return -EINVAL;
7942 }
7943 
7944 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7945 				       struct drm_atomic_state *state)
7946 {
7947 	/* Only support async updates on cursor planes. */
7948 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7949 		return -EINVAL;
7950 
7951 	return 0;
7952 }
7953 
7954 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7955 					 struct drm_atomic_state *state)
7956 {
7957 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7958 									   plane);
7959 	struct drm_plane_state *old_state =
7960 		drm_atomic_get_old_plane_state(state, plane);
7961 
7962 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7963 
7964 	swap(plane->state->fb, new_state->fb);
7965 
7966 	plane->state->src_x = new_state->src_x;
7967 	plane->state->src_y = new_state->src_y;
7968 	plane->state->src_w = new_state->src_w;
7969 	plane->state->src_h = new_state->src_h;
7970 	plane->state->crtc_x = new_state->crtc_x;
7971 	plane->state->crtc_y = new_state->crtc_y;
7972 	plane->state->crtc_w = new_state->crtc_w;
7973 	plane->state->crtc_h = new_state->crtc_h;
7974 
7975 	handle_cursor_update(plane, old_state);
7976 }
7977 
7978 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7979 	.prepare_fb = dm_plane_helper_prepare_fb,
7980 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7981 	.atomic_check = dm_plane_atomic_check,
7982 	.atomic_async_check = dm_plane_atomic_async_check,
7983 	.atomic_async_update = dm_plane_atomic_async_update
7984 };
7985 
7986 /*
7987  * TODO: these are currently initialized to rgb formats only.
7988  * For future use cases we should either initialize them dynamically based on
7989  * plane capabilities, or initialize this array to all formats, so internal drm
7990  * check will succeed, and let DC implement proper check
7991  */
7992 static const uint32_t rgb_formats[] = {
7993 	DRM_FORMAT_XRGB8888,
7994 	DRM_FORMAT_ARGB8888,
7995 	DRM_FORMAT_RGBA8888,
7996 	DRM_FORMAT_XRGB2101010,
7997 	DRM_FORMAT_XBGR2101010,
7998 	DRM_FORMAT_ARGB2101010,
7999 	DRM_FORMAT_ABGR2101010,
8000 	DRM_FORMAT_XRGB16161616,
8001 	DRM_FORMAT_XBGR16161616,
8002 	DRM_FORMAT_ARGB16161616,
8003 	DRM_FORMAT_ABGR16161616,
8004 	DRM_FORMAT_XBGR8888,
8005 	DRM_FORMAT_ABGR8888,
8006 	DRM_FORMAT_RGB565,
8007 };
8008 
8009 static const uint32_t overlay_formats[] = {
8010 	DRM_FORMAT_XRGB8888,
8011 	DRM_FORMAT_ARGB8888,
8012 	DRM_FORMAT_RGBA8888,
8013 	DRM_FORMAT_XBGR8888,
8014 	DRM_FORMAT_ABGR8888,
8015 	DRM_FORMAT_RGB565
8016 };
8017 
8018 static const u32 cursor_formats[] = {
8019 	DRM_FORMAT_ARGB8888
8020 };
8021 
8022 static int get_plane_formats(const struct drm_plane *plane,
8023 			     const struct dc_plane_cap *plane_cap,
8024 			     uint32_t *formats, int max_formats)
8025 {
8026 	int i, num_formats = 0;
8027 
8028 	/*
8029 	 * TODO: Query support for each group of formats directly from
8030 	 * DC plane caps. This will require adding more formats to the
8031 	 * caps list.
8032 	 */
8033 
8034 	switch (plane->type) {
8035 	case DRM_PLANE_TYPE_PRIMARY:
8036 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
8037 			if (num_formats >= max_formats)
8038 				break;
8039 
8040 			formats[num_formats++] = rgb_formats[i];
8041 		}
8042 
8043 		if (plane_cap && plane_cap->pixel_format_support.nv12)
8044 			formats[num_formats++] = DRM_FORMAT_NV12;
8045 		if (plane_cap && plane_cap->pixel_format_support.p010)
8046 			formats[num_formats++] = DRM_FORMAT_P010;
8047 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
8048 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
8049 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
8050 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
8051 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
8052 		}
8053 		break;
8054 
8055 	case DRM_PLANE_TYPE_OVERLAY:
8056 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
8057 			if (num_formats >= max_formats)
8058 				break;
8059 
8060 			formats[num_formats++] = overlay_formats[i];
8061 		}
8062 		break;
8063 
8064 	case DRM_PLANE_TYPE_CURSOR:
8065 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
8066 			if (num_formats >= max_formats)
8067 				break;
8068 
8069 			formats[num_formats++] = cursor_formats[i];
8070 		}
8071 		break;
8072 	}
8073 
8074 	return num_formats;
8075 }
8076 
8077 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
8078 				struct drm_plane *plane,
8079 				unsigned long possible_crtcs,
8080 				const struct dc_plane_cap *plane_cap)
8081 {
8082 	uint32_t formats[32];
8083 	int num_formats;
8084 	int res = -EPERM;
8085 	unsigned int supported_rotations;
8086 	uint64_t *modifiers = NULL;
8087 
8088 	num_formats = get_plane_formats(plane, plane_cap, formats,
8089 					ARRAY_SIZE(formats));
8090 
8091 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
8092 	if (res)
8093 		return res;
8094 
8095 	if (modifiers == NULL)
8096 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8097 
8098 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
8099 				       &dm_plane_funcs, formats, num_formats,
8100 				       modifiers, plane->type, NULL);
8101 	kfree(modifiers);
8102 	if (res)
8103 		return res;
8104 
8105 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8106 	    plane_cap && plane_cap->per_pixel_alpha) {
8107 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
8108 					  BIT(DRM_MODE_BLEND_PREMULTI) |
8109 					  BIT(DRM_MODE_BLEND_COVERAGE);
8110 
8111 		drm_plane_create_alpha_property(plane);
8112 		drm_plane_create_blend_mode_property(plane, blend_caps);
8113 	}
8114 
8115 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
8116 	    plane_cap &&
8117 	    (plane_cap->pixel_format_support.nv12 ||
8118 	     plane_cap->pixel_format_support.p010)) {
8119 		/* This only affects YUV formats. */
8120 		drm_plane_create_color_properties(
8121 			plane,
8122 			BIT(DRM_COLOR_YCBCR_BT601) |
8123 			BIT(DRM_COLOR_YCBCR_BT709) |
8124 			BIT(DRM_COLOR_YCBCR_BT2020),
8125 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8126 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8127 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8128 	}
8129 
8130 	supported_rotations =
8131 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8132 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8133 
8134 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
8135 	    plane->type != DRM_PLANE_TYPE_CURSOR)
8136 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8137 						   supported_rotations);
8138 
8139 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8140 
8141 	/* Create (reset) the plane state */
8142 	if (plane->funcs->reset)
8143 		plane->funcs->reset(plane);
8144 
8145 	return 0;
8146 }
8147 
8148 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8149 			       struct drm_plane *plane,
8150 			       uint32_t crtc_index)
8151 {
8152 	struct amdgpu_crtc *acrtc = NULL;
8153 	struct drm_plane *cursor_plane;
8154 
8155 	int res = -ENOMEM;
8156 
8157 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8158 	if (!cursor_plane)
8159 		goto fail;
8160 
8161 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8162 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8163 
8164 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8165 	if (!acrtc)
8166 		goto fail;
8167 
8168 	res = drm_crtc_init_with_planes(
8169 			dm->ddev,
8170 			&acrtc->base,
8171 			plane,
8172 			cursor_plane,
8173 			&amdgpu_dm_crtc_funcs, NULL);
8174 
8175 	if (res)
8176 		goto fail;
8177 
8178 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8179 
8180 	/* Create (reset) the plane state */
8181 	if (acrtc->base.funcs->reset)
8182 		acrtc->base.funcs->reset(&acrtc->base);
8183 
8184 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8185 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8186 
8187 	acrtc->crtc_id = crtc_index;
8188 	acrtc->base.enabled = false;
8189 	acrtc->otg_inst = -1;
8190 
8191 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8192 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8193 				   true, MAX_COLOR_LUT_ENTRIES);
8194 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8195 
8196 	return 0;
8197 
8198 fail:
8199 	kfree(acrtc);
8200 	kfree(cursor_plane);
8201 	return res;
8202 }
8203 
8204 
8205 static int to_drm_connector_type(enum signal_type st)
8206 {
8207 	switch (st) {
8208 	case SIGNAL_TYPE_HDMI_TYPE_A:
8209 		return DRM_MODE_CONNECTOR_HDMIA;
8210 	case SIGNAL_TYPE_EDP:
8211 		return DRM_MODE_CONNECTOR_eDP;
8212 	case SIGNAL_TYPE_LVDS:
8213 		return DRM_MODE_CONNECTOR_LVDS;
8214 	case SIGNAL_TYPE_RGB:
8215 		return DRM_MODE_CONNECTOR_VGA;
8216 	case SIGNAL_TYPE_DISPLAY_PORT:
8217 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8218 		return DRM_MODE_CONNECTOR_DisplayPort;
8219 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8220 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8221 		return DRM_MODE_CONNECTOR_DVID;
8222 	case SIGNAL_TYPE_VIRTUAL:
8223 		return DRM_MODE_CONNECTOR_VIRTUAL;
8224 
8225 	default:
8226 		return DRM_MODE_CONNECTOR_Unknown;
8227 	}
8228 }
8229 
8230 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8231 {
8232 	struct drm_encoder *encoder;
8233 
8234 	/* There is only one encoder per connector */
8235 	drm_connector_for_each_possible_encoder(connector, encoder)
8236 		return encoder;
8237 
8238 	return NULL;
8239 }
8240 
8241 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8242 {
8243 	struct drm_encoder *encoder;
8244 	struct amdgpu_encoder *amdgpu_encoder;
8245 
8246 	encoder = amdgpu_dm_connector_to_encoder(connector);
8247 
8248 	if (encoder == NULL)
8249 		return;
8250 
8251 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8252 
8253 	amdgpu_encoder->native_mode.clock = 0;
8254 
8255 	if (!list_empty(&connector->probed_modes)) {
8256 		struct drm_display_mode *preferred_mode = NULL;
8257 
8258 		list_for_each_entry(preferred_mode,
8259 				    &connector->probed_modes,
8260 				    head) {
8261 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8262 				amdgpu_encoder->native_mode = *preferred_mode;
8263 
8264 			break;
8265 		}
8266 
8267 	}
8268 }
8269 
8270 static struct drm_display_mode *
8271 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8272 			     char *name,
8273 			     int hdisplay, int vdisplay)
8274 {
8275 	struct drm_device *dev = encoder->dev;
8276 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8277 	struct drm_display_mode *mode = NULL;
8278 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8279 
8280 	mode = drm_mode_duplicate(dev, native_mode);
8281 
8282 	if (mode == NULL)
8283 		return NULL;
8284 
8285 	mode->hdisplay = hdisplay;
8286 	mode->vdisplay = vdisplay;
8287 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8288 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8289 
8290 	return mode;
8291 
8292 }
8293 
8294 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8295 						 struct drm_connector *connector)
8296 {
8297 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8298 	struct drm_display_mode *mode = NULL;
8299 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8300 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8301 				to_amdgpu_dm_connector(connector);
8302 	int i;
8303 	int n;
8304 	struct mode_size {
8305 		char name[DRM_DISPLAY_MODE_LEN];
8306 		int w;
8307 		int h;
8308 	} common_modes[] = {
8309 		{  "640x480",  640,  480},
8310 		{  "800x600",  800,  600},
8311 		{ "1024x768", 1024,  768},
8312 		{ "1280x720", 1280,  720},
8313 		{ "1280x800", 1280,  800},
8314 		{"1280x1024", 1280, 1024},
8315 		{ "1440x900", 1440,  900},
8316 		{"1680x1050", 1680, 1050},
8317 		{"1600x1200", 1600, 1200},
8318 		{"1920x1080", 1920, 1080},
8319 		{"1920x1200", 1920, 1200}
8320 	};
8321 
8322 	n = ARRAY_SIZE(common_modes);
8323 
8324 	for (i = 0; i < n; i++) {
8325 		struct drm_display_mode *curmode = NULL;
8326 		bool mode_existed = false;
8327 
8328 		if (common_modes[i].w > native_mode->hdisplay ||
8329 		    common_modes[i].h > native_mode->vdisplay ||
8330 		   (common_modes[i].w == native_mode->hdisplay &&
8331 		    common_modes[i].h == native_mode->vdisplay))
8332 			continue;
8333 
8334 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8335 			if (common_modes[i].w == curmode->hdisplay &&
8336 			    common_modes[i].h == curmode->vdisplay) {
8337 				mode_existed = true;
8338 				break;
8339 			}
8340 		}
8341 
8342 		if (mode_existed)
8343 			continue;
8344 
8345 		mode = amdgpu_dm_create_common_mode(encoder,
8346 				common_modes[i].name, common_modes[i].w,
8347 				common_modes[i].h);
8348 		if (!mode)
8349 			continue;
8350 
8351 		drm_mode_probed_add(connector, mode);
8352 		amdgpu_dm_connector->num_modes++;
8353 	}
8354 }
8355 
8356 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8357 {
8358 	struct drm_encoder *encoder;
8359 	struct amdgpu_encoder *amdgpu_encoder;
8360 	const struct drm_display_mode *native_mode;
8361 
8362 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8363 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8364 		return;
8365 
8366 	encoder = amdgpu_dm_connector_to_encoder(connector);
8367 	if (!encoder)
8368 		return;
8369 
8370 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8371 
8372 	native_mode = &amdgpu_encoder->native_mode;
8373 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8374 		return;
8375 
8376 	drm_connector_set_panel_orientation_with_quirk(connector,
8377 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8378 						       native_mode->hdisplay,
8379 						       native_mode->vdisplay);
8380 }
8381 
8382 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8383 					      struct edid *edid)
8384 {
8385 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8386 			to_amdgpu_dm_connector(connector);
8387 
8388 	if (edid) {
8389 		/* empty probed_modes */
8390 		INIT_LIST_HEAD(&connector->probed_modes);
8391 		amdgpu_dm_connector->num_modes =
8392 				drm_add_edid_modes(connector, edid);
8393 
8394 		/* sorting the probed modes before calling function
8395 		 * amdgpu_dm_get_native_mode() since EDID can have
8396 		 * more than one preferred mode. The modes that are
8397 		 * later in the probed mode list could be of higher
8398 		 * and preferred resolution. For example, 3840x2160
8399 		 * resolution in base EDID preferred timing and 4096x2160
8400 		 * preferred resolution in DID extension block later.
8401 		 */
8402 		drm_mode_sort(&connector->probed_modes);
8403 		amdgpu_dm_get_native_mode(connector);
8404 
8405 		/* Freesync capabilities are reset by calling
8406 		 * drm_add_edid_modes() and need to be
8407 		 * restored here.
8408 		 */
8409 		amdgpu_dm_update_freesync_caps(connector, edid);
8410 
8411 		amdgpu_set_panel_orientation(connector);
8412 	} else {
8413 		amdgpu_dm_connector->num_modes = 0;
8414 	}
8415 }
8416 
8417 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8418 			      struct drm_display_mode *mode)
8419 {
8420 	struct drm_display_mode *m;
8421 
8422 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8423 		if (drm_mode_equal(m, mode))
8424 			return true;
8425 	}
8426 
8427 	return false;
8428 }
8429 
8430 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8431 {
8432 	const struct drm_display_mode *m;
8433 	struct drm_display_mode *new_mode;
8434 	uint i;
8435 	uint32_t new_modes_count = 0;
8436 
8437 	/* Standard FPS values
8438 	 *
8439 	 * 23.976       - TV/NTSC
8440 	 * 24 	        - Cinema
8441 	 * 25 	        - TV/PAL
8442 	 * 29.97        - TV/NTSC
8443 	 * 30 	        - TV/NTSC
8444 	 * 48 	        - Cinema HFR
8445 	 * 50 	        - TV/PAL
8446 	 * 60 	        - Commonly used
8447 	 * 48,72,96,120 - Multiples of 24
8448 	 */
8449 	static const uint32_t common_rates[] = {
8450 		23976, 24000, 25000, 29970, 30000,
8451 		48000, 50000, 60000, 72000, 96000, 120000
8452 	};
8453 
8454 	/*
8455 	 * Find mode with highest refresh rate with the same resolution
8456 	 * as the preferred mode. Some monitors report a preferred mode
8457 	 * with lower resolution than the highest refresh rate supported.
8458 	 */
8459 
8460 	m = get_highest_refresh_rate_mode(aconnector, true);
8461 	if (!m)
8462 		return 0;
8463 
8464 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8465 		uint64_t target_vtotal, target_vtotal_diff;
8466 		uint64_t num, den;
8467 
8468 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8469 			continue;
8470 
8471 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8472 		    common_rates[i] > aconnector->max_vfreq * 1000)
8473 			continue;
8474 
8475 		num = (unsigned long long)m->clock * 1000 * 1000;
8476 		den = common_rates[i] * (unsigned long long)m->htotal;
8477 		target_vtotal = div_u64(num, den);
8478 		target_vtotal_diff = target_vtotal - m->vtotal;
8479 
8480 		/* Check for illegal modes */
8481 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8482 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8483 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8484 			continue;
8485 
8486 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8487 		if (!new_mode)
8488 			goto out;
8489 
8490 		new_mode->vtotal += (u16)target_vtotal_diff;
8491 		new_mode->vsync_start += (u16)target_vtotal_diff;
8492 		new_mode->vsync_end += (u16)target_vtotal_diff;
8493 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8494 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8495 
8496 		if (!is_duplicate_mode(aconnector, new_mode)) {
8497 			drm_mode_probed_add(&aconnector->base, new_mode);
8498 			new_modes_count += 1;
8499 		} else
8500 			drm_mode_destroy(aconnector->base.dev, new_mode);
8501 	}
8502  out:
8503 	return new_modes_count;
8504 }
8505 
8506 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8507 						   struct edid *edid)
8508 {
8509 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8510 		to_amdgpu_dm_connector(connector);
8511 
8512 	if (!edid)
8513 		return;
8514 
8515 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8516 		amdgpu_dm_connector->num_modes +=
8517 			add_fs_modes(amdgpu_dm_connector);
8518 }
8519 
8520 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8521 {
8522 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8523 			to_amdgpu_dm_connector(connector);
8524 	struct drm_encoder *encoder;
8525 	struct edid *edid = amdgpu_dm_connector->edid;
8526 
8527 	encoder = amdgpu_dm_connector_to_encoder(connector);
8528 
8529 	if (!drm_edid_is_valid(edid)) {
8530 		amdgpu_dm_connector->num_modes =
8531 				drm_add_modes_noedid(connector, 640, 480);
8532 	} else {
8533 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8534 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8535 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8536 	}
8537 	amdgpu_dm_fbc_init(connector);
8538 
8539 	return amdgpu_dm_connector->num_modes;
8540 }
8541 
8542 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8543 				     struct amdgpu_dm_connector *aconnector,
8544 				     int connector_type,
8545 				     struct dc_link *link,
8546 				     int link_index)
8547 {
8548 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8549 
8550 	/*
8551 	 * Some of the properties below require access to state, like bpc.
8552 	 * Allocate some default initial connector state with our reset helper.
8553 	 */
8554 	if (aconnector->base.funcs->reset)
8555 		aconnector->base.funcs->reset(&aconnector->base);
8556 
8557 	aconnector->connector_id = link_index;
8558 	aconnector->dc_link = link;
8559 	aconnector->base.interlace_allowed = false;
8560 	aconnector->base.doublescan_allowed = false;
8561 	aconnector->base.stereo_allowed = false;
8562 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8563 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8564 	aconnector->audio_inst = -1;
8565 	mutex_init(&aconnector->hpd_lock);
8566 
8567 	/*
8568 	 * configure support HPD hot plug connector_>polled default value is 0
8569 	 * which means HPD hot plug not supported
8570 	 */
8571 	switch (connector_type) {
8572 	case DRM_MODE_CONNECTOR_HDMIA:
8573 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8574 		aconnector->base.ycbcr_420_allowed =
8575 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8576 		break;
8577 	case DRM_MODE_CONNECTOR_DisplayPort:
8578 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8579 		link->link_enc = link_enc_cfg_get_link_enc(link);
8580 		ASSERT(link->link_enc);
8581 		if (link->link_enc)
8582 			aconnector->base.ycbcr_420_allowed =
8583 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8584 		break;
8585 	case DRM_MODE_CONNECTOR_DVID:
8586 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8587 		break;
8588 	default:
8589 		break;
8590 	}
8591 
8592 	drm_object_attach_property(&aconnector->base.base,
8593 				dm->ddev->mode_config.scaling_mode_property,
8594 				DRM_MODE_SCALE_NONE);
8595 
8596 	drm_object_attach_property(&aconnector->base.base,
8597 				adev->mode_info.underscan_property,
8598 				UNDERSCAN_OFF);
8599 	drm_object_attach_property(&aconnector->base.base,
8600 				adev->mode_info.underscan_hborder_property,
8601 				0);
8602 	drm_object_attach_property(&aconnector->base.base,
8603 				adev->mode_info.underscan_vborder_property,
8604 				0);
8605 
8606 	if (!aconnector->mst_port)
8607 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8608 
8609 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8610 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8611 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8612 
8613 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8614 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8615 		drm_object_attach_property(&aconnector->base.base,
8616 				adev->mode_info.abm_level_property, 0);
8617 	}
8618 
8619 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8620 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8621 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8622 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8623 
8624 		if (!aconnector->mst_port)
8625 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8626 
8627 #ifdef CONFIG_DRM_AMD_DC_HDCP
8628 		if (adev->dm.hdcp_workqueue)
8629 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8630 #endif
8631 	}
8632 }
8633 
8634 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8635 			      struct i2c_msg *msgs, int num)
8636 {
8637 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8638 	struct ddc_service *ddc_service = i2c->ddc_service;
8639 	struct i2c_command cmd;
8640 	int i;
8641 	int result = -EIO;
8642 
8643 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8644 
8645 	if (!cmd.payloads)
8646 		return result;
8647 
8648 	cmd.number_of_payloads = num;
8649 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8650 	cmd.speed = 100;
8651 
8652 	for (i = 0; i < num; i++) {
8653 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8654 		cmd.payloads[i].address = msgs[i].addr;
8655 		cmd.payloads[i].length = msgs[i].len;
8656 		cmd.payloads[i].data = msgs[i].buf;
8657 	}
8658 
8659 	if (dc_submit_i2c(
8660 			ddc_service->ctx->dc,
8661 			ddc_service->link->link_index,
8662 			&cmd))
8663 		result = num;
8664 
8665 	kfree(cmd.payloads);
8666 	return result;
8667 }
8668 
8669 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8670 {
8671 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8672 }
8673 
8674 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8675 	.master_xfer = amdgpu_dm_i2c_xfer,
8676 	.functionality = amdgpu_dm_i2c_func,
8677 };
8678 
8679 static struct amdgpu_i2c_adapter *
8680 create_i2c(struct ddc_service *ddc_service,
8681 	   int link_index,
8682 	   int *res)
8683 {
8684 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8685 	struct amdgpu_i2c_adapter *i2c;
8686 
8687 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8688 	if (!i2c)
8689 		return NULL;
8690 	i2c->base.owner = THIS_MODULE;
8691 	i2c->base.class = I2C_CLASS_DDC;
8692 	i2c->base.dev.parent = &adev->pdev->dev;
8693 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8694 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8695 	i2c_set_adapdata(&i2c->base, i2c);
8696 	i2c->ddc_service = ddc_service;
8697 
8698 	return i2c;
8699 }
8700 
8701 
8702 /*
8703  * Note: this function assumes that dc_link_detect() was called for the
8704  * dc_link which will be represented by this aconnector.
8705  */
8706 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8707 				    struct amdgpu_dm_connector *aconnector,
8708 				    uint32_t link_index,
8709 				    struct amdgpu_encoder *aencoder)
8710 {
8711 	int res = 0;
8712 	int connector_type;
8713 	struct dc *dc = dm->dc;
8714 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8715 	struct amdgpu_i2c_adapter *i2c;
8716 
8717 	link->priv = aconnector;
8718 
8719 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8720 
8721 	i2c = create_i2c(link->ddc, link->link_index, &res);
8722 	if (!i2c) {
8723 		DRM_ERROR("Failed to create i2c adapter data\n");
8724 		return -ENOMEM;
8725 	}
8726 
8727 	aconnector->i2c = i2c;
8728 	res = i2c_add_adapter(&i2c->base);
8729 
8730 	if (res) {
8731 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8732 		goto out_free;
8733 	}
8734 
8735 	connector_type = to_drm_connector_type(link->connector_signal);
8736 
8737 	res = drm_connector_init_with_ddc(
8738 			dm->ddev,
8739 			&aconnector->base,
8740 			&amdgpu_dm_connector_funcs,
8741 			connector_type,
8742 			&i2c->base);
8743 
8744 	if (res) {
8745 		DRM_ERROR("connector_init failed\n");
8746 		aconnector->connector_id = -1;
8747 		goto out_free;
8748 	}
8749 
8750 	drm_connector_helper_add(
8751 			&aconnector->base,
8752 			&amdgpu_dm_connector_helper_funcs);
8753 
8754 	amdgpu_dm_connector_init_helper(
8755 		dm,
8756 		aconnector,
8757 		connector_type,
8758 		link,
8759 		link_index);
8760 
8761 	drm_connector_attach_encoder(
8762 		&aconnector->base, &aencoder->base);
8763 
8764 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8765 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8766 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8767 
8768 out_free:
8769 	if (res) {
8770 		kfree(i2c);
8771 		aconnector->i2c = NULL;
8772 	}
8773 	return res;
8774 }
8775 
8776 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8777 {
8778 	switch (adev->mode_info.num_crtc) {
8779 	case 1:
8780 		return 0x1;
8781 	case 2:
8782 		return 0x3;
8783 	case 3:
8784 		return 0x7;
8785 	case 4:
8786 		return 0xf;
8787 	case 5:
8788 		return 0x1f;
8789 	case 6:
8790 	default:
8791 		return 0x3f;
8792 	}
8793 }
8794 
8795 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8796 				  struct amdgpu_encoder *aencoder,
8797 				  uint32_t link_index)
8798 {
8799 	struct amdgpu_device *adev = drm_to_adev(dev);
8800 
8801 	int res = drm_encoder_init(dev,
8802 				   &aencoder->base,
8803 				   &amdgpu_dm_encoder_funcs,
8804 				   DRM_MODE_ENCODER_TMDS,
8805 				   NULL);
8806 
8807 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8808 
8809 	if (!res)
8810 		aencoder->encoder_id = link_index;
8811 	else
8812 		aencoder->encoder_id = -1;
8813 
8814 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8815 
8816 	return res;
8817 }
8818 
8819 static void manage_dm_interrupts(struct amdgpu_device *adev,
8820 				 struct amdgpu_crtc *acrtc,
8821 				 bool enable)
8822 {
8823 	/*
8824 	 * We have no guarantee that the frontend index maps to the same
8825 	 * backend index - some even map to more than one.
8826 	 *
8827 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8828 	 */
8829 	int irq_type =
8830 		amdgpu_display_crtc_idx_to_irq_type(
8831 			adev,
8832 			acrtc->crtc_id);
8833 
8834 	if (enable) {
8835 		drm_crtc_vblank_on(&acrtc->base);
8836 		amdgpu_irq_get(
8837 			adev,
8838 			&adev->pageflip_irq,
8839 			irq_type);
8840 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8841 		amdgpu_irq_get(
8842 			adev,
8843 			&adev->vline0_irq,
8844 			irq_type);
8845 #endif
8846 	} else {
8847 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8848 		amdgpu_irq_put(
8849 			adev,
8850 			&adev->vline0_irq,
8851 			irq_type);
8852 #endif
8853 		amdgpu_irq_put(
8854 			adev,
8855 			&adev->pageflip_irq,
8856 			irq_type);
8857 		drm_crtc_vblank_off(&acrtc->base);
8858 	}
8859 }
8860 
8861 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8862 				      struct amdgpu_crtc *acrtc)
8863 {
8864 	int irq_type =
8865 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8866 
8867 	/**
8868 	 * This reads the current state for the IRQ and force reapplies
8869 	 * the setting to hardware.
8870 	 */
8871 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8872 }
8873 
8874 static bool
8875 is_scaling_state_different(const struct dm_connector_state *dm_state,
8876 			   const struct dm_connector_state *old_dm_state)
8877 {
8878 	if (dm_state->scaling != old_dm_state->scaling)
8879 		return true;
8880 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8881 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8882 			return true;
8883 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8884 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8885 			return true;
8886 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8887 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8888 		return true;
8889 	return false;
8890 }
8891 
8892 #ifdef CONFIG_DRM_AMD_DC_HDCP
8893 static bool is_content_protection_different(struct drm_connector_state *state,
8894 					    const struct drm_connector_state *old_state,
8895 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8896 {
8897 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8898 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8899 
8900 	/* Handle: Type0/1 change */
8901 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8902 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8903 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8904 		return true;
8905 	}
8906 
8907 	/* CP is being re enabled, ignore this
8908 	 *
8909 	 * Handles:	ENABLED -> DESIRED
8910 	 */
8911 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8912 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8913 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8914 		return false;
8915 	}
8916 
8917 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8918 	 *
8919 	 * Handles:	UNDESIRED -> ENABLED
8920 	 */
8921 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8922 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8923 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8924 
8925 	/* Stream removed and re-enabled
8926 	 *
8927 	 * Can sometimes overlap with the HPD case,
8928 	 * thus set update_hdcp to false to avoid
8929 	 * setting HDCP multiple times.
8930 	 *
8931 	 * Handles:	DESIRED -> DESIRED (Special case)
8932 	 */
8933 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8934 		state->crtc && state->crtc->enabled &&
8935 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8936 		dm_con_state->update_hdcp = false;
8937 		return true;
8938 	}
8939 
8940 	/* Hot-plug, headless s3, dpms
8941 	 *
8942 	 * Only start HDCP if the display is connected/enabled.
8943 	 * update_hdcp flag will be set to false until the next
8944 	 * HPD comes in.
8945 	 *
8946 	 * Handles:	DESIRED -> DESIRED (Special case)
8947 	 */
8948 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8949 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8950 		dm_con_state->update_hdcp = false;
8951 		return true;
8952 	}
8953 
8954 	/*
8955 	 * Handles:	UNDESIRED -> UNDESIRED
8956 	 *		DESIRED -> DESIRED
8957 	 *		ENABLED -> ENABLED
8958 	 */
8959 	if (old_state->content_protection == state->content_protection)
8960 		return false;
8961 
8962 	/*
8963 	 * Handles:	UNDESIRED -> DESIRED
8964 	 *		DESIRED -> UNDESIRED
8965 	 *		ENABLED -> UNDESIRED
8966 	 */
8967 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8968 		return true;
8969 
8970 	/*
8971 	 * Handles:	DESIRED -> ENABLED
8972 	 */
8973 	return false;
8974 }
8975 
8976 #endif
8977 static void remove_stream(struct amdgpu_device *adev,
8978 			  struct amdgpu_crtc *acrtc,
8979 			  struct dc_stream_state *stream)
8980 {
8981 	/* this is the update mode case */
8982 
8983 	acrtc->otg_inst = -1;
8984 	acrtc->enabled = false;
8985 }
8986 
8987 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8988 			       struct dc_cursor_position *position)
8989 {
8990 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8991 	int x, y;
8992 	int xorigin = 0, yorigin = 0;
8993 
8994 	if (!crtc || !plane->state->fb)
8995 		return 0;
8996 
8997 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8998 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8999 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
9000 			  __func__,
9001 			  plane->state->crtc_w,
9002 			  plane->state->crtc_h);
9003 		return -EINVAL;
9004 	}
9005 
9006 	x = plane->state->crtc_x;
9007 	y = plane->state->crtc_y;
9008 
9009 	if (x <= -amdgpu_crtc->max_cursor_width ||
9010 	    y <= -amdgpu_crtc->max_cursor_height)
9011 		return 0;
9012 
9013 	if (x < 0) {
9014 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
9015 		x = 0;
9016 	}
9017 	if (y < 0) {
9018 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
9019 		y = 0;
9020 	}
9021 	position->enable = true;
9022 	position->translate_by_source = true;
9023 	position->x = x;
9024 	position->y = y;
9025 	position->x_hotspot = xorigin;
9026 	position->y_hotspot = yorigin;
9027 
9028 	return 0;
9029 }
9030 
9031 static void handle_cursor_update(struct drm_plane *plane,
9032 				 struct drm_plane_state *old_plane_state)
9033 {
9034 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
9035 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9036 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9037 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9038 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9039 	uint64_t address = afb ? afb->address : 0;
9040 	struct dc_cursor_position position = {0};
9041 	struct dc_cursor_attributes attributes;
9042 	int ret;
9043 
9044 	if (!plane->state->fb && !old_plane_state->fb)
9045 		return;
9046 
9047 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
9048 		      __func__,
9049 		      amdgpu_crtc->crtc_id,
9050 		      plane->state->crtc_w,
9051 		      plane->state->crtc_h);
9052 
9053 	ret = get_cursor_position(plane, crtc, &position);
9054 	if (ret)
9055 		return;
9056 
9057 	if (!position.enable) {
9058 		/* turn off cursor */
9059 		if (crtc_state && crtc_state->stream) {
9060 			mutex_lock(&adev->dm.dc_lock);
9061 			dc_stream_set_cursor_position(crtc_state->stream,
9062 						      &position);
9063 			mutex_unlock(&adev->dm.dc_lock);
9064 		}
9065 		return;
9066 	}
9067 
9068 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
9069 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
9070 
9071 	memset(&attributes, 0, sizeof(attributes));
9072 	attributes.address.high_part = upper_32_bits(address);
9073 	attributes.address.low_part  = lower_32_bits(address);
9074 	attributes.width             = plane->state->crtc_w;
9075 	attributes.height            = plane->state->crtc_h;
9076 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9077 	attributes.rotation_angle    = 0;
9078 	attributes.attribute_flags.value = 0;
9079 
9080 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
9081 
9082 	if (crtc_state->stream) {
9083 		mutex_lock(&adev->dm.dc_lock);
9084 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9085 							 &attributes))
9086 			DRM_ERROR("DC failed to set cursor attributes\n");
9087 
9088 		if (!dc_stream_set_cursor_position(crtc_state->stream,
9089 						   &position))
9090 			DRM_ERROR("DC failed to set cursor position\n");
9091 		mutex_unlock(&adev->dm.dc_lock);
9092 	}
9093 }
9094 
9095 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9096 {
9097 
9098 	assert_spin_locked(&acrtc->base.dev->event_lock);
9099 	WARN_ON(acrtc->event);
9100 
9101 	acrtc->event = acrtc->base.state->event;
9102 
9103 	/* Set the flip status */
9104 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9105 
9106 	/* Mark this event as consumed */
9107 	acrtc->base.state->event = NULL;
9108 
9109 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9110 		     acrtc->crtc_id);
9111 }
9112 
9113 static void update_freesync_state_on_stream(
9114 	struct amdgpu_display_manager *dm,
9115 	struct dm_crtc_state *new_crtc_state,
9116 	struct dc_stream_state *new_stream,
9117 	struct dc_plane_state *surface,
9118 	u32 flip_timestamp_in_us)
9119 {
9120 	struct mod_vrr_params vrr_params;
9121 	struct dc_info_packet vrr_infopacket = {0};
9122 	struct amdgpu_device *adev = dm->adev;
9123 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9124 	unsigned long flags;
9125 	bool pack_sdp_v1_3 = false;
9126 
9127 	if (!new_stream)
9128 		return;
9129 
9130 	/*
9131 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9132 	 * For now it's sufficient to just guard against these conditions.
9133 	 */
9134 
9135 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9136 		return;
9137 
9138 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9139         vrr_params = acrtc->dm_irq_params.vrr_params;
9140 
9141 	if (surface) {
9142 		mod_freesync_handle_preflip(
9143 			dm->freesync_module,
9144 			surface,
9145 			new_stream,
9146 			flip_timestamp_in_us,
9147 			&vrr_params);
9148 
9149 		if (adev->family < AMDGPU_FAMILY_AI &&
9150 		    amdgpu_dm_vrr_active(new_crtc_state)) {
9151 			mod_freesync_handle_v_update(dm->freesync_module,
9152 						     new_stream, &vrr_params);
9153 
9154 			/* Need to call this before the frame ends. */
9155 			dc_stream_adjust_vmin_vmax(dm->dc,
9156 						   new_crtc_state->stream,
9157 						   &vrr_params.adjust);
9158 		}
9159 	}
9160 
9161 	mod_freesync_build_vrr_infopacket(
9162 		dm->freesync_module,
9163 		new_stream,
9164 		&vrr_params,
9165 		PACKET_TYPE_VRR,
9166 		TRANSFER_FUNC_UNKNOWN,
9167 		&vrr_infopacket,
9168 		pack_sdp_v1_3);
9169 
9170 	new_crtc_state->freesync_timing_changed |=
9171 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9172 			&vrr_params.adjust,
9173 			sizeof(vrr_params.adjust)) != 0);
9174 
9175 	new_crtc_state->freesync_vrr_info_changed |=
9176 		(memcmp(&new_crtc_state->vrr_infopacket,
9177 			&vrr_infopacket,
9178 			sizeof(vrr_infopacket)) != 0);
9179 
9180 	acrtc->dm_irq_params.vrr_params = vrr_params;
9181 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9182 
9183 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9184 	new_stream->vrr_infopacket = vrr_infopacket;
9185 
9186 	if (new_crtc_state->freesync_vrr_info_changed)
9187 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9188 			      new_crtc_state->base.crtc->base.id,
9189 			      (int)new_crtc_state->base.vrr_enabled,
9190 			      (int)vrr_params.state);
9191 
9192 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9193 }
9194 
9195 static void update_stream_irq_parameters(
9196 	struct amdgpu_display_manager *dm,
9197 	struct dm_crtc_state *new_crtc_state)
9198 {
9199 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9200 	struct mod_vrr_params vrr_params;
9201 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9202 	struct amdgpu_device *adev = dm->adev;
9203 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9204 	unsigned long flags;
9205 
9206 	if (!new_stream)
9207 		return;
9208 
9209 	/*
9210 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9211 	 * For now it's sufficient to just guard against these conditions.
9212 	 */
9213 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9214 		return;
9215 
9216 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9217 	vrr_params = acrtc->dm_irq_params.vrr_params;
9218 
9219 	if (new_crtc_state->vrr_supported &&
9220 	    config.min_refresh_in_uhz &&
9221 	    config.max_refresh_in_uhz) {
9222 		/*
9223 		 * if freesync compatible mode was set, config.state will be set
9224 		 * in atomic check
9225 		 */
9226 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9227 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9228 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9229 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9230 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9231 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9232 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9233 		} else {
9234 			config.state = new_crtc_state->base.vrr_enabled ?
9235 						     VRR_STATE_ACTIVE_VARIABLE :
9236 						     VRR_STATE_INACTIVE;
9237 		}
9238 	} else {
9239 		config.state = VRR_STATE_UNSUPPORTED;
9240 	}
9241 
9242 	mod_freesync_build_vrr_params(dm->freesync_module,
9243 				      new_stream,
9244 				      &config, &vrr_params);
9245 
9246 	new_crtc_state->freesync_timing_changed |=
9247 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9248 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9249 
9250 	new_crtc_state->freesync_config = config;
9251 	/* Copy state for access from DM IRQ handler */
9252 	acrtc->dm_irq_params.freesync_config = config;
9253 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9254 	acrtc->dm_irq_params.vrr_params = vrr_params;
9255 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9256 }
9257 
9258 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9259 					    struct dm_crtc_state *new_state)
9260 {
9261 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9262 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9263 
9264 	if (!old_vrr_active && new_vrr_active) {
9265 		/* Transition VRR inactive -> active:
9266 		 * While VRR is active, we must not disable vblank irq, as a
9267 		 * reenable after disable would compute bogus vblank/pflip
9268 		 * timestamps if it likely happened inside display front-porch.
9269 		 *
9270 		 * We also need vupdate irq for the actual core vblank handling
9271 		 * at end of vblank.
9272 		 */
9273 		dm_set_vupdate_irq(new_state->base.crtc, true);
9274 		drm_crtc_vblank_get(new_state->base.crtc);
9275 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9276 				 __func__, new_state->base.crtc->base.id);
9277 	} else if (old_vrr_active && !new_vrr_active) {
9278 		/* Transition VRR active -> inactive:
9279 		 * Allow vblank irq disable again for fixed refresh rate.
9280 		 */
9281 		dm_set_vupdate_irq(new_state->base.crtc, false);
9282 		drm_crtc_vblank_put(new_state->base.crtc);
9283 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9284 				 __func__, new_state->base.crtc->base.id);
9285 	}
9286 }
9287 
9288 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9289 {
9290 	struct drm_plane *plane;
9291 	struct drm_plane_state *old_plane_state;
9292 	int i;
9293 
9294 	/*
9295 	 * TODO: Make this per-stream so we don't issue redundant updates for
9296 	 * commits with multiple streams.
9297 	 */
9298 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9299 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9300 			handle_cursor_update(plane, old_plane_state);
9301 }
9302 
9303 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9304 				    struct dc_state *dc_state,
9305 				    struct drm_device *dev,
9306 				    struct amdgpu_display_manager *dm,
9307 				    struct drm_crtc *pcrtc,
9308 				    bool wait_for_vblank)
9309 {
9310 	uint32_t i;
9311 	uint64_t timestamp_ns;
9312 	struct drm_plane *plane;
9313 	struct drm_plane_state *old_plane_state, *new_plane_state;
9314 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9315 	struct drm_crtc_state *new_pcrtc_state =
9316 			drm_atomic_get_new_crtc_state(state, pcrtc);
9317 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9318 	struct dm_crtc_state *dm_old_crtc_state =
9319 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9320 	int planes_count = 0, vpos, hpos;
9321 	long r;
9322 	unsigned long flags;
9323 	struct amdgpu_bo *abo;
9324 	uint32_t target_vblank, last_flip_vblank;
9325 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9326 	bool pflip_present = false;
9327 	struct {
9328 		struct dc_surface_update surface_updates[MAX_SURFACES];
9329 		struct dc_plane_info plane_infos[MAX_SURFACES];
9330 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9331 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9332 		struct dc_stream_update stream_update;
9333 	} *bundle;
9334 
9335 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9336 
9337 	if (!bundle) {
9338 		dm_error("Failed to allocate update bundle\n");
9339 		goto cleanup;
9340 	}
9341 
9342 	/*
9343 	 * Disable the cursor first if we're disabling all the planes.
9344 	 * It'll remain on the screen after the planes are re-enabled
9345 	 * if we don't.
9346 	 */
9347 	if (acrtc_state->active_planes == 0)
9348 		amdgpu_dm_commit_cursors(state);
9349 
9350 	/* update planes when needed */
9351 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9352 		struct drm_crtc *crtc = new_plane_state->crtc;
9353 		struct drm_crtc_state *new_crtc_state;
9354 		struct drm_framebuffer *fb = new_plane_state->fb;
9355 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9356 		bool plane_needs_flip;
9357 		struct dc_plane_state *dc_plane;
9358 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9359 
9360 		/* Cursor plane is handled after stream updates */
9361 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9362 			continue;
9363 
9364 		if (!fb || !crtc || pcrtc != crtc)
9365 			continue;
9366 
9367 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9368 		if (!new_crtc_state->active)
9369 			continue;
9370 
9371 		dc_plane = dm_new_plane_state->dc_state;
9372 
9373 		bundle->surface_updates[planes_count].surface = dc_plane;
9374 		if (new_pcrtc_state->color_mgmt_changed) {
9375 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9376 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9377 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9378 		}
9379 
9380 		fill_dc_scaling_info(dm->adev, new_plane_state,
9381 				     &bundle->scaling_infos[planes_count]);
9382 
9383 		bundle->surface_updates[planes_count].scaling_info =
9384 			&bundle->scaling_infos[planes_count];
9385 
9386 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9387 
9388 		pflip_present = pflip_present || plane_needs_flip;
9389 
9390 		if (!plane_needs_flip) {
9391 			planes_count += 1;
9392 			continue;
9393 		}
9394 
9395 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9396 
9397 		/*
9398 		 * Wait for all fences on this FB. Do limited wait to avoid
9399 		 * deadlock during GPU reset when this fence will not signal
9400 		 * but we hold reservation lock for the BO.
9401 		 */
9402 		r = dma_resv_wait_timeout(abo->tbo.base.resv,
9403 					  DMA_RESV_USAGE_WRITE, false,
9404 					  msecs_to_jiffies(5000));
9405 		if (unlikely(r <= 0))
9406 			DRM_ERROR("Waiting for fences timed out!");
9407 
9408 		fill_dc_plane_info_and_addr(
9409 			dm->adev, new_plane_state,
9410 			afb->tiling_flags,
9411 			&bundle->plane_infos[planes_count],
9412 			&bundle->flip_addrs[planes_count].address,
9413 			afb->tmz_surface, false);
9414 
9415 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9416 				 new_plane_state->plane->index,
9417 				 bundle->plane_infos[planes_count].dcc.enable);
9418 
9419 		bundle->surface_updates[planes_count].plane_info =
9420 			&bundle->plane_infos[planes_count];
9421 
9422 		fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
9423 				    new_crtc_state,
9424 				    &bundle->flip_addrs[planes_count]);
9425 
9426 		/*
9427 		 * Only allow immediate flips for fast updates that don't
9428 		 * change FB pitch, DCC state, rotation or mirroing.
9429 		 */
9430 		bundle->flip_addrs[planes_count].flip_immediate =
9431 			crtc->state->async_flip &&
9432 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9433 
9434 		timestamp_ns = ktime_get_ns();
9435 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9436 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9437 		bundle->surface_updates[planes_count].surface = dc_plane;
9438 
9439 		if (!bundle->surface_updates[planes_count].surface) {
9440 			DRM_ERROR("No surface for CRTC: id=%d\n",
9441 					acrtc_attach->crtc_id);
9442 			continue;
9443 		}
9444 
9445 		if (plane == pcrtc->primary)
9446 			update_freesync_state_on_stream(
9447 				dm,
9448 				acrtc_state,
9449 				acrtc_state->stream,
9450 				dc_plane,
9451 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9452 
9453 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9454 				 __func__,
9455 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9456 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9457 
9458 		planes_count += 1;
9459 
9460 	}
9461 
9462 	if (pflip_present) {
9463 		if (!vrr_active) {
9464 			/* Use old throttling in non-vrr fixed refresh rate mode
9465 			 * to keep flip scheduling based on target vblank counts
9466 			 * working in a backwards compatible way, e.g., for
9467 			 * clients using the GLX_OML_sync_control extension or
9468 			 * DRI3/Present extension with defined target_msc.
9469 			 */
9470 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9471 		}
9472 		else {
9473 			/* For variable refresh rate mode only:
9474 			 * Get vblank of last completed flip to avoid > 1 vrr
9475 			 * flips per video frame by use of throttling, but allow
9476 			 * flip programming anywhere in the possibly large
9477 			 * variable vrr vblank interval for fine-grained flip
9478 			 * timing control and more opportunity to avoid stutter
9479 			 * on late submission of flips.
9480 			 */
9481 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9482 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9483 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9484 		}
9485 
9486 		target_vblank = last_flip_vblank + wait_for_vblank;
9487 
9488 		/*
9489 		 * Wait until we're out of the vertical blank period before the one
9490 		 * targeted by the flip
9491 		 */
9492 		while ((acrtc_attach->enabled &&
9493 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9494 							    0, &vpos, &hpos, NULL,
9495 							    NULL, &pcrtc->hwmode)
9496 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9497 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9498 			(int)(target_vblank -
9499 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9500 			usleep_range(1000, 1100);
9501 		}
9502 
9503 		/**
9504 		 * Prepare the flip event for the pageflip interrupt to handle.
9505 		 *
9506 		 * This only works in the case where we've already turned on the
9507 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9508 		 * from 0 -> n planes we have to skip a hardware generated event
9509 		 * and rely on sending it from software.
9510 		 */
9511 		if (acrtc_attach->base.state->event &&
9512 		    acrtc_state->active_planes > 0) {
9513 			drm_crtc_vblank_get(pcrtc);
9514 
9515 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9516 
9517 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9518 			prepare_flip_isr(acrtc_attach);
9519 
9520 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9521 		}
9522 
9523 		if (acrtc_state->stream) {
9524 			if (acrtc_state->freesync_vrr_info_changed)
9525 				bundle->stream_update.vrr_infopacket =
9526 					&acrtc_state->stream->vrr_infopacket;
9527 		}
9528 	}
9529 
9530 	/* Update the planes if changed or disable if we don't have any. */
9531 	if ((planes_count || acrtc_state->active_planes == 0) &&
9532 		acrtc_state->stream) {
9533 		/*
9534 		 * If PSR or idle optimizations are enabled then flush out
9535 		 * any pending work before hardware programming.
9536 		 */
9537 		if (dm->vblank_control_workqueue)
9538 			flush_workqueue(dm->vblank_control_workqueue);
9539 
9540 		bundle->stream_update.stream = acrtc_state->stream;
9541 		if (new_pcrtc_state->mode_changed) {
9542 			bundle->stream_update.src = acrtc_state->stream->src;
9543 			bundle->stream_update.dst = acrtc_state->stream->dst;
9544 		}
9545 
9546 		if (new_pcrtc_state->color_mgmt_changed) {
9547 			/*
9548 			 * TODO: This isn't fully correct since we've actually
9549 			 * already modified the stream in place.
9550 			 */
9551 			bundle->stream_update.gamut_remap =
9552 				&acrtc_state->stream->gamut_remap_matrix;
9553 			bundle->stream_update.output_csc_transform =
9554 				&acrtc_state->stream->csc_color_matrix;
9555 			bundle->stream_update.out_transfer_func =
9556 				acrtc_state->stream->out_transfer_func;
9557 		}
9558 
9559 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9560 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9561 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9562 
9563 		/*
9564 		 * If FreeSync state on the stream has changed then we need to
9565 		 * re-adjust the min/max bounds now that DC doesn't handle this
9566 		 * as part of commit.
9567 		 */
9568 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9569 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9570 			dc_stream_adjust_vmin_vmax(
9571 				dm->dc, acrtc_state->stream,
9572 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9573 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9574 		}
9575 		mutex_lock(&dm->dc_lock);
9576 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9577 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9578 			amdgpu_dm_psr_disable(acrtc_state->stream);
9579 
9580 		dc_commit_updates_for_stream(dm->dc,
9581 						     bundle->surface_updates,
9582 						     planes_count,
9583 						     acrtc_state->stream,
9584 						     &bundle->stream_update,
9585 						     dc_state);
9586 
9587 		/**
9588 		 * Enable or disable the interrupts on the backend.
9589 		 *
9590 		 * Most pipes are put into power gating when unused.
9591 		 *
9592 		 * When power gating is enabled on a pipe we lose the
9593 		 * interrupt enablement state when power gating is disabled.
9594 		 *
9595 		 * So we need to update the IRQ control state in hardware
9596 		 * whenever the pipe turns on (since it could be previously
9597 		 * power gated) or off (since some pipes can't be power gated
9598 		 * on some ASICs).
9599 		 */
9600 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9601 			dm_update_pflip_irq_state(drm_to_adev(dev),
9602 						  acrtc_attach);
9603 
9604 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9605 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9606 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9607 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9608 
9609 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9610 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9611 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9612 			struct amdgpu_dm_connector *aconn =
9613 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9614 
9615 			if (aconn->psr_skip_count > 0)
9616 				aconn->psr_skip_count--;
9617 
9618 			/* Allow PSR when skip count is 0. */
9619 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9620 
9621 			/*
9622 			 * If sink supports PSR SU, there is no need to rely on
9623 			 * a vblank event disable request to enable PSR. PSR SU
9624 			 * can be enabled immediately once OS demonstrates an
9625 			 * adequate number of fast atomic commits to notify KMD
9626 			 * of update events. See `vblank_control_worker()`.
9627 			 */
9628 			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9629 			    acrtc_attach->dm_irq_params.allow_psr_entry &&
9630 			    !acrtc_state->stream->link->psr_settings.psr_allow_active)
9631 				amdgpu_dm_psr_enable(acrtc_state->stream);
9632 		} else {
9633 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9634 		}
9635 
9636 		mutex_unlock(&dm->dc_lock);
9637 	}
9638 
9639 	/*
9640 	 * Update cursor state *after* programming all the planes.
9641 	 * This avoids redundant programming in the case where we're going
9642 	 * to be disabling a single plane - those pipes are being disabled.
9643 	 */
9644 	if (acrtc_state->active_planes)
9645 		amdgpu_dm_commit_cursors(state);
9646 
9647 cleanup:
9648 	kfree(bundle);
9649 }
9650 
9651 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9652 				   struct drm_atomic_state *state)
9653 {
9654 	struct amdgpu_device *adev = drm_to_adev(dev);
9655 	struct amdgpu_dm_connector *aconnector;
9656 	struct drm_connector *connector;
9657 	struct drm_connector_state *old_con_state, *new_con_state;
9658 	struct drm_crtc_state *new_crtc_state;
9659 	struct dm_crtc_state *new_dm_crtc_state;
9660 	const struct dc_stream_status *status;
9661 	int i, inst;
9662 
9663 	/* Notify device removals. */
9664 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9665 		if (old_con_state->crtc != new_con_state->crtc) {
9666 			/* CRTC changes require notification. */
9667 			goto notify;
9668 		}
9669 
9670 		if (!new_con_state->crtc)
9671 			continue;
9672 
9673 		new_crtc_state = drm_atomic_get_new_crtc_state(
9674 			state, new_con_state->crtc);
9675 
9676 		if (!new_crtc_state)
9677 			continue;
9678 
9679 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9680 			continue;
9681 
9682 	notify:
9683 		aconnector = to_amdgpu_dm_connector(connector);
9684 
9685 		mutex_lock(&adev->dm.audio_lock);
9686 		inst = aconnector->audio_inst;
9687 		aconnector->audio_inst = -1;
9688 		mutex_unlock(&adev->dm.audio_lock);
9689 
9690 		amdgpu_dm_audio_eld_notify(adev, inst);
9691 	}
9692 
9693 	/* Notify audio device additions. */
9694 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9695 		if (!new_con_state->crtc)
9696 			continue;
9697 
9698 		new_crtc_state = drm_atomic_get_new_crtc_state(
9699 			state, new_con_state->crtc);
9700 
9701 		if (!new_crtc_state)
9702 			continue;
9703 
9704 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9705 			continue;
9706 
9707 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9708 		if (!new_dm_crtc_state->stream)
9709 			continue;
9710 
9711 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9712 		if (!status)
9713 			continue;
9714 
9715 		aconnector = to_amdgpu_dm_connector(connector);
9716 
9717 		mutex_lock(&adev->dm.audio_lock);
9718 		inst = status->audio_inst;
9719 		aconnector->audio_inst = inst;
9720 		mutex_unlock(&adev->dm.audio_lock);
9721 
9722 		amdgpu_dm_audio_eld_notify(adev, inst);
9723 	}
9724 }
9725 
9726 /*
9727  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9728  * @crtc_state: the DRM CRTC state
9729  * @stream_state: the DC stream state.
9730  *
9731  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9732  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9733  */
9734 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9735 						struct dc_stream_state *stream_state)
9736 {
9737 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9738 }
9739 
9740 /**
9741  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9742  * @state: The atomic state to commit
9743  *
9744  * This will tell DC to commit the constructed DC state from atomic_check,
9745  * programming the hardware. Any failures here implies a hardware failure, since
9746  * atomic check should have filtered anything non-kosher.
9747  */
9748 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9749 {
9750 	struct drm_device *dev = state->dev;
9751 	struct amdgpu_device *adev = drm_to_adev(dev);
9752 	struct amdgpu_display_manager *dm = &adev->dm;
9753 	struct dm_atomic_state *dm_state;
9754 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9755 	uint32_t i, j;
9756 	struct drm_crtc *crtc;
9757 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9758 	unsigned long flags;
9759 	bool wait_for_vblank = true;
9760 	struct drm_connector *connector;
9761 	struct drm_connector_state *old_con_state, *new_con_state;
9762 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9763 	int crtc_disable_count = 0;
9764 	bool mode_set_reset_required = false;
9765 
9766 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9767 
9768 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9769 
9770 	dm_state = dm_atomic_get_new_state(state);
9771 	if (dm_state && dm_state->context) {
9772 		dc_state = dm_state->context;
9773 	} else {
9774 		/* No state changes, retain current state. */
9775 		dc_state_temp = dc_create_state(dm->dc);
9776 		ASSERT(dc_state_temp);
9777 		dc_state = dc_state_temp;
9778 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9779 	}
9780 
9781 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9782 				       new_crtc_state, i) {
9783 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9784 
9785 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9786 
9787 		if (old_crtc_state->active &&
9788 		    (!new_crtc_state->active ||
9789 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9790 			manage_dm_interrupts(adev, acrtc, false);
9791 			dc_stream_release(dm_old_crtc_state->stream);
9792 		}
9793 	}
9794 
9795 	drm_atomic_helper_calc_timestamping_constants(state);
9796 
9797 	/* update changed items */
9798 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9799 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9800 
9801 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9802 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9803 
9804 		drm_dbg_state(state->dev,
9805 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9806 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9807 			"connectors_changed:%d\n",
9808 			acrtc->crtc_id,
9809 			new_crtc_state->enable,
9810 			new_crtc_state->active,
9811 			new_crtc_state->planes_changed,
9812 			new_crtc_state->mode_changed,
9813 			new_crtc_state->active_changed,
9814 			new_crtc_state->connectors_changed);
9815 
9816 		/* Disable cursor if disabling crtc */
9817 		if (old_crtc_state->active && !new_crtc_state->active) {
9818 			struct dc_cursor_position position;
9819 
9820 			memset(&position, 0, sizeof(position));
9821 			mutex_lock(&dm->dc_lock);
9822 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9823 			mutex_unlock(&dm->dc_lock);
9824 		}
9825 
9826 		/* Copy all transient state flags into dc state */
9827 		if (dm_new_crtc_state->stream) {
9828 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9829 							    dm_new_crtc_state->stream);
9830 		}
9831 
9832 		/* handles headless hotplug case, updating new_state and
9833 		 * aconnector as needed
9834 		 */
9835 
9836 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9837 
9838 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9839 
9840 			if (!dm_new_crtc_state->stream) {
9841 				/*
9842 				 * this could happen because of issues with
9843 				 * userspace notifications delivery.
9844 				 * In this case userspace tries to set mode on
9845 				 * display which is disconnected in fact.
9846 				 * dc_sink is NULL in this case on aconnector.
9847 				 * We expect reset mode will come soon.
9848 				 *
9849 				 * This can also happen when unplug is done
9850 				 * during resume sequence ended
9851 				 *
9852 				 * In this case, we want to pretend we still
9853 				 * have a sink to keep the pipe running so that
9854 				 * hw state is consistent with the sw state
9855 				 */
9856 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9857 						__func__, acrtc->base.base.id);
9858 				continue;
9859 			}
9860 
9861 			if (dm_old_crtc_state->stream)
9862 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9863 
9864 			pm_runtime_get_noresume(dev->dev);
9865 
9866 			acrtc->enabled = true;
9867 			acrtc->hw_mode = new_crtc_state->mode;
9868 			crtc->hwmode = new_crtc_state->mode;
9869 			mode_set_reset_required = true;
9870 		} else if (modereset_required(new_crtc_state)) {
9871 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9872 			/* i.e. reset mode */
9873 			if (dm_old_crtc_state->stream)
9874 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9875 
9876 			mode_set_reset_required = true;
9877 		}
9878 	} /* for_each_crtc_in_state() */
9879 
9880 	if (dc_state) {
9881 		/* if there mode set or reset, disable eDP PSR */
9882 		if (mode_set_reset_required) {
9883 			if (dm->vblank_control_workqueue)
9884 				flush_workqueue(dm->vblank_control_workqueue);
9885 
9886 			amdgpu_dm_psr_disable_all(dm);
9887 		}
9888 
9889 		dm_enable_per_frame_crtc_master_sync(dc_state);
9890 		mutex_lock(&dm->dc_lock);
9891 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9892 
9893 		/* Allow idle optimization when vblank count is 0 for display off */
9894 		if (dm->active_vblank_irq_count == 0)
9895 			dc_allow_idle_optimizations(dm->dc, true);
9896 		mutex_unlock(&dm->dc_lock);
9897 	}
9898 
9899 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9900 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9901 
9902 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9903 
9904 		if (dm_new_crtc_state->stream != NULL) {
9905 			const struct dc_stream_status *status =
9906 					dc_stream_get_status(dm_new_crtc_state->stream);
9907 
9908 			if (!status)
9909 				status = dc_stream_get_status_from_state(dc_state,
9910 									 dm_new_crtc_state->stream);
9911 			if (!status)
9912 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9913 			else
9914 				acrtc->otg_inst = status->primary_otg_inst;
9915 		}
9916 	}
9917 #ifdef CONFIG_DRM_AMD_DC_HDCP
9918 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9919 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9920 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9921 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9922 
9923 		new_crtc_state = NULL;
9924 
9925 		if (acrtc)
9926 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9927 
9928 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9929 
9930 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9931 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9932 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9933 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9934 			dm_new_con_state->update_hdcp = true;
9935 			continue;
9936 		}
9937 
9938 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9939 			hdcp_update_display(
9940 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9941 				new_con_state->hdcp_content_type,
9942 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9943 	}
9944 #endif
9945 
9946 	/* Handle connector state changes */
9947 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9948 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9949 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9950 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9951 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9952 		struct dc_stream_update stream_update;
9953 		struct dc_info_packet hdr_packet;
9954 		struct dc_stream_status *status = NULL;
9955 		bool abm_changed, hdr_changed, scaling_changed;
9956 
9957 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9958 		memset(&stream_update, 0, sizeof(stream_update));
9959 
9960 		if (acrtc) {
9961 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9962 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9963 		}
9964 
9965 		/* Skip any modesets/resets */
9966 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9967 			continue;
9968 
9969 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9970 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9971 
9972 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9973 							     dm_old_con_state);
9974 
9975 		abm_changed = dm_new_crtc_state->abm_level !=
9976 			      dm_old_crtc_state->abm_level;
9977 
9978 		hdr_changed =
9979 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9980 
9981 		if (!scaling_changed && !abm_changed && !hdr_changed)
9982 			continue;
9983 
9984 		stream_update.stream = dm_new_crtc_state->stream;
9985 		if (scaling_changed) {
9986 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9987 					dm_new_con_state, dm_new_crtc_state->stream);
9988 
9989 			stream_update.src = dm_new_crtc_state->stream->src;
9990 			stream_update.dst = dm_new_crtc_state->stream->dst;
9991 		}
9992 
9993 		if (abm_changed) {
9994 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9995 
9996 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9997 		}
9998 
9999 		if (hdr_changed) {
10000 			fill_hdr_info_packet(new_con_state, &hdr_packet);
10001 			stream_update.hdr_static_metadata = &hdr_packet;
10002 		}
10003 
10004 		status = dc_stream_get_status(dm_new_crtc_state->stream);
10005 
10006 		if (WARN_ON(!status))
10007 			continue;
10008 
10009 		WARN_ON(!status->plane_count);
10010 
10011 		/*
10012 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
10013 		 * Here we create an empty update on each plane.
10014 		 * To fix this, DC should permit updating only stream properties.
10015 		 */
10016 		for (j = 0; j < status->plane_count; j++)
10017 			dummy_updates[j].surface = status->plane_states[0];
10018 
10019 
10020 		mutex_lock(&dm->dc_lock);
10021 		dc_commit_updates_for_stream(dm->dc,
10022 						     dummy_updates,
10023 						     status->plane_count,
10024 						     dm_new_crtc_state->stream,
10025 						     &stream_update,
10026 						     dc_state);
10027 		mutex_unlock(&dm->dc_lock);
10028 	}
10029 
10030 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
10031 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
10032 				      new_crtc_state, i) {
10033 		if (old_crtc_state->active && !new_crtc_state->active)
10034 			crtc_disable_count++;
10035 
10036 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10037 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10038 
10039 		/* For freesync config update on crtc state and params for irq */
10040 		update_stream_irq_parameters(dm, dm_new_crtc_state);
10041 
10042 		/* Handle vrr on->off / off->on transitions */
10043 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
10044 						dm_new_crtc_state);
10045 	}
10046 
10047 	/**
10048 	 * Enable interrupts for CRTCs that are newly enabled or went through
10049 	 * a modeset. It was intentionally deferred until after the front end
10050 	 * state was modified to wait until the OTG was on and so the IRQ
10051 	 * handlers didn't access stale or invalid state.
10052 	 */
10053 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10054 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10055 #ifdef CONFIG_DEBUG_FS
10056 		bool configure_crc = false;
10057 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
10058 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10059 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
10060 #endif
10061 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10062 		cur_crc_src = acrtc->dm_irq_params.crc_src;
10063 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10064 #endif
10065 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10066 
10067 		if (new_crtc_state->active &&
10068 		    (!old_crtc_state->active ||
10069 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10070 			dc_stream_retain(dm_new_crtc_state->stream);
10071 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
10072 			manage_dm_interrupts(adev, acrtc, true);
10073 
10074 #ifdef CONFIG_DEBUG_FS
10075 			/**
10076 			 * Frontend may have changed so reapply the CRC capture
10077 			 * settings for the stream.
10078 			 */
10079 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10080 
10081 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
10082 				configure_crc = true;
10083 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10084 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
10085 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10086 					acrtc->dm_irq_params.crc_window.update_win = true;
10087 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
10088 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
10089 					crc_rd_wrk->crtc = crtc;
10090 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
10091 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10092 				}
10093 #endif
10094 			}
10095 
10096 			if (configure_crc)
10097 				if (amdgpu_dm_crtc_configure_crc_source(
10098 					crtc, dm_new_crtc_state, cur_crc_src))
10099 					DRM_DEBUG_DRIVER("Failed to configure crc source");
10100 #endif
10101 		}
10102 	}
10103 
10104 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
10105 		if (new_crtc_state->async_flip)
10106 			wait_for_vblank = false;
10107 
10108 	/* update planes when needed per crtc*/
10109 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10110 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10111 
10112 		if (dm_new_crtc_state->stream)
10113 			amdgpu_dm_commit_planes(state, dc_state, dev,
10114 						dm, crtc, wait_for_vblank);
10115 	}
10116 
10117 	/* Update audio instances for each connector. */
10118 	amdgpu_dm_commit_audio(dev, state);
10119 
10120 	/* restore the backlight level */
10121 	for (i = 0; i < dm->num_of_edps; i++) {
10122 		if (dm->backlight_dev[i] &&
10123 		    (dm->actual_brightness[i] != dm->brightness[i]))
10124 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10125 	}
10126 
10127 	/*
10128 	 * send vblank event on all events not handled in flip and
10129 	 * mark consumed event for drm_atomic_helper_commit_hw_done
10130 	 */
10131 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10132 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10133 
10134 		if (new_crtc_state->event)
10135 			drm_send_event_locked(dev, &new_crtc_state->event->base);
10136 
10137 		new_crtc_state->event = NULL;
10138 	}
10139 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10140 
10141 	/* Signal HW programming completion */
10142 	drm_atomic_helper_commit_hw_done(state);
10143 
10144 	if (wait_for_vblank)
10145 		drm_atomic_helper_wait_for_flip_done(dev, state);
10146 
10147 	drm_atomic_helper_cleanup_planes(dev, state);
10148 
10149 	/* return the stolen vga memory back to VRAM */
10150 	if (!adev->mman.keep_stolen_vga_memory)
10151 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10152 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10153 
10154 	/*
10155 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
10156 	 * so we can put the GPU into runtime suspend if we're not driving any
10157 	 * displays anymore
10158 	 */
10159 	for (i = 0; i < crtc_disable_count; i++)
10160 		pm_runtime_put_autosuspend(dev->dev);
10161 	pm_runtime_mark_last_busy(dev->dev);
10162 
10163 	if (dc_state_temp)
10164 		dc_release_state(dc_state_temp);
10165 }
10166 
10167 
10168 static int dm_force_atomic_commit(struct drm_connector *connector)
10169 {
10170 	int ret = 0;
10171 	struct drm_device *ddev = connector->dev;
10172 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10173 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10174 	struct drm_plane *plane = disconnected_acrtc->base.primary;
10175 	struct drm_connector_state *conn_state;
10176 	struct drm_crtc_state *crtc_state;
10177 	struct drm_plane_state *plane_state;
10178 
10179 	if (!state)
10180 		return -ENOMEM;
10181 
10182 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10183 
10184 	/* Construct an atomic state to restore previous display setting */
10185 
10186 	/*
10187 	 * Attach connectors to drm_atomic_state
10188 	 */
10189 	conn_state = drm_atomic_get_connector_state(state, connector);
10190 
10191 	ret = PTR_ERR_OR_ZERO(conn_state);
10192 	if (ret)
10193 		goto out;
10194 
10195 	/* Attach crtc to drm_atomic_state*/
10196 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10197 
10198 	ret = PTR_ERR_OR_ZERO(crtc_state);
10199 	if (ret)
10200 		goto out;
10201 
10202 	/* force a restore */
10203 	crtc_state->mode_changed = true;
10204 
10205 	/* Attach plane to drm_atomic_state */
10206 	plane_state = drm_atomic_get_plane_state(state, plane);
10207 
10208 	ret = PTR_ERR_OR_ZERO(plane_state);
10209 	if (ret)
10210 		goto out;
10211 
10212 	/* Call commit internally with the state we just constructed */
10213 	ret = drm_atomic_commit(state);
10214 
10215 out:
10216 	drm_atomic_state_put(state);
10217 	if (ret)
10218 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10219 
10220 	return ret;
10221 }
10222 
10223 /*
10224  * This function handles all cases when set mode does not come upon hotplug.
10225  * This includes when a display is unplugged then plugged back into the
10226  * same port and when running without usermode desktop manager supprot
10227  */
10228 void dm_restore_drm_connector_state(struct drm_device *dev,
10229 				    struct drm_connector *connector)
10230 {
10231 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10232 	struct amdgpu_crtc *disconnected_acrtc;
10233 	struct dm_crtc_state *acrtc_state;
10234 
10235 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10236 		return;
10237 
10238 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10239 	if (!disconnected_acrtc)
10240 		return;
10241 
10242 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10243 	if (!acrtc_state->stream)
10244 		return;
10245 
10246 	/*
10247 	 * If the previous sink is not released and different from the current,
10248 	 * we deduce we are in a state where we can not rely on usermode call
10249 	 * to turn on the display, so we do it here
10250 	 */
10251 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10252 		dm_force_atomic_commit(&aconnector->base);
10253 }
10254 
10255 /*
10256  * Grabs all modesetting locks to serialize against any blocking commits,
10257  * Waits for completion of all non blocking commits.
10258  */
10259 static int do_aquire_global_lock(struct drm_device *dev,
10260 				 struct drm_atomic_state *state)
10261 {
10262 	struct drm_crtc *crtc;
10263 	struct drm_crtc_commit *commit;
10264 	long ret;
10265 
10266 	/*
10267 	 * Adding all modeset locks to aquire_ctx will
10268 	 * ensure that when the framework release it the
10269 	 * extra locks we are locking here will get released to
10270 	 */
10271 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10272 	if (ret)
10273 		return ret;
10274 
10275 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10276 		spin_lock(&crtc->commit_lock);
10277 		commit = list_first_entry_or_null(&crtc->commit_list,
10278 				struct drm_crtc_commit, commit_entry);
10279 		if (commit)
10280 			drm_crtc_commit_get(commit);
10281 		spin_unlock(&crtc->commit_lock);
10282 
10283 		if (!commit)
10284 			continue;
10285 
10286 		/*
10287 		 * Make sure all pending HW programming completed and
10288 		 * page flips done
10289 		 */
10290 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10291 
10292 		if (ret > 0)
10293 			ret = wait_for_completion_interruptible_timeout(
10294 					&commit->flip_done, 10*HZ);
10295 
10296 		if (ret == 0)
10297 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10298 				  "timed out\n", crtc->base.id, crtc->name);
10299 
10300 		drm_crtc_commit_put(commit);
10301 	}
10302 
10303 	return ret < 0 ? ret : 0;
10304 }
10305 
10306 static void get_freesync_config_for_crtc(
10307 	struct dm_crtc_state *new_crtc_state,
10308 	struct dm_connector_state *new_con_state)
10309 {
10310 	struct mod_freesync_config config = {0};
10311 	struct amdgpu_dm_connector *aconnector =
10312 			to_amdgpu_dm_connector(new_con_state->base.connector);
10313 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10314 	int vrefresh = drm_mode_vrefresh(mode);
10315 	bool fs_vid_mode = false;
10316 
10317 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10318 					vrefresh >= aconnector->min_vfreq &&
10319 					vrefresh <= aconnector->max_vfreq;
10320 
10321 	if (new_crtc_state->vrr_supported) {
10322 		new_crtc_state->stream->ignore_msa_timing_param = true;
10323 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10324 
10325 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10326 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10327 		config.vsif_supported = true;
10328 		config.btr = true;
10329 
10330 		if (fs_vid_mode) {
10331 			config.state = VRR_STATE_ACTIVE_FIXED;
10332 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10333 			goto out;
10334 		} else if (new_crtc_state->base.vrr_enabled) {
10335 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10336 		} else {
10337 			config.state = VRR_STATE_INACTIVE;
10338 		}
10339 	}
10340 out:
10341 	new_crtc_state->freesync_config = config;
10342 }
10343 
10344 static void reset_freesync_config_for_crtc(
10345 	struct dm_crtc_state *new_crtc_state)
10346 {
10347 	new_crtc_state->vrr_supported = false;
10348 
10349 	memset(&new_crtc_state->vrr_infopacket, 0,
10350 	       sizeof(new_crtc_state->vrr_infopacket));
10351 }
10352 
10353 static bool
10354 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10355 				 struct drm_crtc_state *new_crtc_state)
10356 {
10357 	const struct drm_display_mode *old_mode, *new_mode;
10358 
10359 	if (!old_crtc_state || !new_crtc_state)
10360 		return false;
10361 
10362 	old_mode = &old_crtc_state->mode;
10363 	new_mode = &new_crtc_state->mode;
10364 
10365 	if (old_mode->clock       == new_mode->clock &&
10366 	    old_mode->hdisplay    == new_mode->hdisplay &&
10367 	    old_mode->vdisplay    == new_mode->vdisplay &&
10368 	    old_mode->htotal      == new_mode->htotal &&
10369 	    old_mode->vtotal      != new_mode->vtotal &&
10370 	    old_mode->hsync_start == new_mode->hsync_start &&
10371 	    old_mode->vsync_start != new_mode->vsync_start &&
10372 	    old_mode->hsync_end   == new_mode->hsync_end &&
10373 	    old_mode->vsync_end   != new_mode->vsync_end &&
10374 	    old_mode->hskew       == new_mode->hskew &&
10375 	    old_mode->vscan       == new_mode->vscan &&
10376 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10377 	    (new_mode->vsync_end - new_mode->vsync_start))
10378 		return true;
10379 
10380 	return false;
10381 }
10382 
10383 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10384 	uint64_t num, den, res;
10385 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10386 
10387 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10388 
10389 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10390 	den = (unsigned long long)new_crtc_state->mode.htotal *
10391 	      (unsigned long long)new_crtc_state->mode.vtotal;
10392 
10393 	res = div_u64(num, den);
10394 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10395 }
10396 
10397 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10398 			 struct drm_atomic_state *state,
10399 			 struct drm_crtc *crtc,
10400 			 struct drm_crtc_state *old_crtc_state,
10401 			 struct drm_crtc_state *new_crtc_state,
10402 			 bool enable,
10403 			 bool *lock_and_validation_needed)
10404 {
10405 	struct dm_atomic_state *dm_state = NULL;
10406 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10407 	struct dc_stream_state *new_stream;
10408 	int ret = 0;
10409 
10410 	/*
10411 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10412 	 * update changed items
10413 	 */
10414 	struct amdgpu_crtc *acrtc = NULL;
10415 	struct amdgpu_dm_connector *aconnector = NULL;
10416 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10417 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10418 
10419 	new_stream = NULL;
10420 
10421 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10422 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10423 	acrtc = to_amdgpu_crtc(crtc);
10424 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10425 
10426 	/* TODO This hack should go away */
10427 	if (aconnector && enable) {
10428 		/* Make sure fake sink is created in plug-in scenario */
10429 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10430 							    &aconnector->base);
10431 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10432 							    &aconnector->base);
10433 
10434 		if (IS_ERR(drm_new_conn_state)) {
10435 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10436 			goto fail;
10437 		}
10438 
10439 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10440 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10441 
10442 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10443 			goto skip_modeset;
10444 
10445 		new_stream = create_validate_stream_for_sink(aconnector,
10446 							     &new_crtc_state->mode,
10447 							     dm_new_conn_state,
10448 							     dm_old_crtc_state->stream);
10449 
10450 		/*
10451 		 * we can have no stream on ACTION_SET if a display
10452 		 * was disconnected during S3, in this case it is not an
10453 		 * error, the OS will be updated after detection, and
10454 		 * will do the right thing on next atomic commit
10455 		 */
10456 
10457 		if (!new_stream) {
10458 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10459 					__func__, acrtc->base.base.id);
10460 			ret = -ENOMEM;
10461 			goto fail;
10462 		}
10463 
10464 		/*
10465 		 * TODO: Check VSDB bits to decide whether this should
10466 		 * be enabled or not.
10467 		 */
10468 		new_stream->triggered_crtc_reset.enabled =
10469 			dm->force_timing_sync;
10470 
10471 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10472 
10473 		ret = fill_hdr_info_packet(drm_new_conn_state,
10474 					   &new_stream->hdr_static_metadata);
10475 		if (ret)
10476 			goto fail;
10477 
10478 		/*
10479 		 * If we already removed the old stream from the context
10480 		 * (and set the new stream to NULL) then we can't reuse
10481 		 * the old stream even if the stream and scaling are unchanged.
10482 		 * We'll hit the BUG_ON and black screen.
10483 		 *
10484 		 * TODO: Refactor this function to allow this check to work
10485 		 * in all conditions.
10486 		 */
10487 		if (dm_new_crtc_state->stream &&
10488 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10489 			goto skip_modeset;
10490 
10491 		if (dm_new_crtc_state->stream &&
10492 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10493 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10494 			new_crtc_state->mode_changed = false;
10495 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10496 					 new_crtc_state->mode_changed);
10497 		}
10498 	}
10499 
10500 	/* mode_changed flag may get updated above, need to check again */
10501 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10502 		goto skip_modeset;
10503 
10504 	drm_dbg_state(state->dev,
10505 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10506 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10507 		"connectors_changed:%d\n",
10508 		acrtc->crtc_id,
10509 		new_crtc_state->enable,
10510 		new_crtc_state->active,
10511 		new_crtc_state->planes_changed,
10512 		new_crtc_state->mode_changed,
10513 		new_crtc_state->active_changed,
10514 		new_crtc_state->connectors_changed);
10515 
10516 	/* Remove stream for any changed/disabled CRTC */
10517 	if (!enable) {
10518 
10519 		if (!dm_old_crtc_state->stream)
10520 			goto skip_modeset;
10521 
10522 		if (dm_new_crtc_state->stream &&
10523 		    is_timing_unchanged_for_freesync(new_crtc_state,
10524 						     old_crtc_state)) {
10525 			new_crtc_state->mode_changed = false;
10526 			DRM_DEBUG_DRIVER(
10527 				"Mode change not required for front porch change, "
10528 				"setting mode_changed to %d",
10529 				new_crtc_state->mode_changed);
10530 
10531 			set_freesync_fixed_config(dm_new_crtc_state);
10532 
10533 			goto skip_modeset;
10534 		} else if (aconnector &&
10535 			   is_freesync_video_mode(&new_crtc_state->mode,
10536 						  aconnector)) {
10537 			struct drm_display_mode *high_mode;
10538 
10539 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10540 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10541 				set_freesync_fixed_config(dm_new_crtc_state);
10542 			}
10543 		}
10544 
10545 		ret = dm_atomic_get_state(state, &dm_state);
10546 		if (ret)
10547 			goto fail;
10548 
10549 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10550 				crtc->base.id);
10551 
10552 		/* i.e. reset mode */
10553 		if (dc_remove_stream_from_ctx(
10554 				dm->dc,
10555 				dm_state->context,
10556 				dm_old_crtc_state->stream) != DC_OK) {
10557 			ret = -EINVAL;
10558 			goto fail;
10559 		}
10560 
10561 		dc_stream_release(dm_old_crtc_state->stream);
10562 		dm_new_crtc_state->stream = NULL;
10563 
10564 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10565 
10566 		*lock_and_validation_needed = true;
10567 
10568 	} else {/* Add stream for any updated/enabled CRTC */
10569 		/*
10570 		 * Quick fix to prevent NULL pointer on new_stream when
10571 		 * added MST connectors not found in existing crtc_state in the chained mode
10572 		 * TODO: need to dig out the root cause of that
10573 		 */
10574 		if (!aconnector)
10575 			goto skip_modeset;
10576 
10577 		if (modereset_required(new_crtc_state))
10578 			goto skip_modeset;
10579 
10580 		if (modeset_required(new_crtc_state, new_stream,
10581 				     dm_old_crtc_state->stream)) {
10582 
10583 			WARN_ON(dm_new_crtc_state->stream);
10584 
10585 			ret = dm_atomic_get_state(state, &dm_state);
10586 			if (ret)
10587 				goto fail;
10588 
10589 			dm_new_crtc_state->stream = new_stream;
10590 
10591 			dc_stream_retain(new_stream);
10592 
10593 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10594 					 crtc->base.id);
10595 
10596 			if (dc_add_stream_to_ctx(
10597 					dm->dc,
10598 					dm_state->context,
10599 					dm_new_crtc_state->stream) != DC_OK) {
10600 				ret = -EINVAL;
10601 				goto fail;
10602 			}
10603 
10604 			*lock_and_validation_needed = true;
10605 		}
10606 	}
10607 
10608 skip_modeset:
10609 	/* Release extra reference */
10610 	if (new_stream)
10611 		 dc_stream_release(new_stream);
10612 
10613 	/*
10614 	 * We want to do dc stream updates that do not require a
10615 	 * full modeset below.
10616 	 */
10617 	if (!(enable && aconnector && new_crtc_state->active))
10618 		return 0;
10619 	/*
10620 	 * Given above conditions, the dc state cannot be NULL because:
10621 	 * 1. We're in the process of enabling CRTCs (just been added
10622 	 *    to the dc context, or already is on the context)
10623 	 * 2. Has a valid connector attached, and
10624 	 * 3. Is currently active and enabled.
10625 	 * => The dc stream state currently exists.
10626 	 */
10627 	BUG_ON(dm_new_crtc_state->stream == NULL);
10628 
10629 	/* Scaling or underscan settings */
10630 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10631 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10632 		update_stream_scaling_settings(
10633 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10634 
10635 	/* ABM settings */
10636 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10637 
10638 	/*
10639 	 * Color management settings. We also update color properties
10640 	 * when a modeset is needed, to ensure it gets reprogrammed.
10641 	 */
10642 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10643 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10644 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10645 		if (ret)
10646 			goto fail;
10647 	}
10648 
10649 	/* Update Freesync settings. */
10650 	get_freesync_config_for_crtc(dm_new_crtc_state,
10651 				     dm_new_conn_state);
10652 
10653 	return ret;
10654 
10655 fail:
10656 	if (new_stream)
10657 		dc_stream_release(new_stream);
10658 	return ret;
10659 }
10660 
10661 static bool should_reset_plane(struct drm_atomic_state *state,
10662 			       struct drm_plane *plane,
10663 			       struct drm_plane_state *old_plane_state,
10664 			       struct drm_plane_state *new_plane_state)
10665 {
10666 	struct drm_plane *other;
10667 	struct drm_plane_state *old_other_state, *new_other_state;
10668 	struct drm_crtc_state *new_crtc_state;
10669 	int i;
10670 
10671 	/*
10672 	 * TODO: Remove this hack once the checks below are sufficient
10673 	 * enough to determine when we need to reset all the planes on
10674 	 * the stream.
10675 	 */
10676 	if (state->allow_modeset)
10677 		return true;
10678 
10679 	/* Exit early if we know that we're adding or removing the plane. */
10680 	if (old_plane_state->crtc != new_plane_state->crtc)
10681 		return true;
10682 
10683 	/* old crtc == new_crtc == NULL, plane not in context. */
10684 	if (!new_plane_state->crtc)
10685 		return false;
10686 
10687 	new_crtc_state =
10688 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10689 
10690 	if (!new_crtc_state)
10691 		return true;
10692 
10693 	/* CRTC Degamma changes currently require us to recreate planes. */
10694 	if (new_crtc_state->color_mgmt_changed)
10695 		return true;
10696 
10697 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10698 		return true;
10699 
10700 	/*
10701 	 * If there are any new primary or overlay planes being added or
10702 	 * removed then the z-order can potentially change. To ensure
10703 	 * correct z-order and pipe acquisition the current DC architecture
10704 	 * requires us to remove and recreate all existing planes.
10705 	 *
10706 	 * TODO: Come up with a more elegant solution for this.
10707 	 */
10708 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10709 		struct amdgpu_framebuffer *old_afb, *new_afb;
10710 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10711 			continue;
10712 
10713 		if (old_other_state->crtc != new_plane_state->crtc &&
10714 		    new_other_state->crtc != new_plane_state->crtc)
10715 			continue;
10716 
10717 		if (old_other_state->crtc != new_other_state->crtc)
10718 			return true;
10719 
10720 		/* Src/dst size and scaling updates. */
10721 		if (old_other_state->src_w != new_other_state->src_w ||
10722 		    old_other_state->src_h != new_other_state->src_h ||
10723 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10724 		    old_other_state->crtc_h != new_other_state->crtc_h)
10725 			return true;
10726 
10727 		/* Rotation / mirroring updates. */
10728 		if (old_other_state->rotation != new_other_state->rotation)
10729 			return true;
10730 
10731 		/* Blending updates. */
10732 		if (old_other_state->pixel_blend_mode !=
10733 		    new_other_state->pixel_blend_mode)
10734 			return true;
10735 
10736 		/* Alpha updates. */
10737 		if (old_other_state->alpha != new_other_state->alpha)
10738 			return true;
10739 
10740 		/* Colorspace changes. */
10741 		if (old_other_state->color_range != new_other_state->color_range ||
10742 		    old_other_state->color_encoding != new_other_state->color_encoding)
10743 			return true;
10744 
10745 		/* Framebuffer checks fall at the end. */
10746 		if (!old_other_state->fb || !new_other_state->fb)
10747 			continue;
10748 
10749 		/* Pixel format changes can require bandwidth updates. */
10750 		if (old_other_state->fb->format != new_other_state->fb->format)
10751 			return true;
10752 
10753 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10754 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10755 
10756 		/* Tiling and DCC changes also require bandwidth updates. */
10757 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10758 		    old_afb->base.modifier != new_afb->base.modifier)
10759 			return true;
10760 	}
10761 
10762 	return false;
10763 }
10764 
10765 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10766 			      struct drm_plane_state *new_plane_state,
10767 			      struct drm_framebuffer *fb)
10768 {
10769 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10770 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10771 	unsigned int pitch;
10772 	bool linear;
10773 
10774 	if (fb->width > new_acrtc->max_cursor_width ||
10775 	    fb->height > new_acrtc->max_cursor_height) {
10776 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10777 				 new_plane_state->fb->width,
10778 				 new_plane_state->fb->height);
10779 		return -EINVAL;
10780 	}
10781 	if (new_plane_state->src_w != fb->width << 16 ||
10782 	    new_plane_state->src_h != fb->height << 16) {
10783 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10784 		return -EINVAL;
10785 	}
10786 
10787 	/* Pitch in pixels */
10788 	pitch = fb->pitches[0] / fb->format->cpp[0];
10789 
10790 	if (fb->width != pitch) {
10791 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10792 				 fb->width, pitch);
10793 		return -EINVAL;
10794 	}
10795 
10796 	switch (pitch) {
10797 	case 64:
10798 	case 128:
10799 	case 256:
10800 		/* FB pitch is supported by cursor plane */
10801 		break;
10802 	default:
10803 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10804 		return -EINVAL;
10805 	}
10806 
10807 	/* Core DRM takes care of checking FB modifiers, so we only need to
10808 	 * check tiling flags when the FB doesn't have a modifier. */
10809 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10810 		if (adev->family < AMDGPU_FAMILY_AI) {
10811 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10812 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10813 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10814 		} else {
10815 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10816 		}
10817 		if (!linear) {
10818 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10819 			return -EINVAL;
10820 		}
10821 	}
10822 
10823 	return 0;
10824 }
10825 
10826 static int dm_update_plane_state(struct dc *dc,
10827 				 struct drm_atomic_state *state,
10828 				 struct drm_plane *plane,
10829 				 struct drm_plane_state *old_plane_state,
10830 				 struct drm_plane_state *new_plane_state,
10831 				 bool enable,
10832 				 bool *lock_and_validation_needed)
10833 {
10834 
10835 	struct dm_atomic_state *dm_state = NULL;
10836 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10837 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10838 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10839 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10840 	struct amdgpu_crtc *new_acrtc;
10841 	bool needs_reset;
10842 	int ret = 0;
10843 
10844 
10845 	new_plane_crtc = new_plane_state->crtc;
10846 	old_plane_crtc = old_plane_state->crtc;
10847 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10848 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10849 
10850 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10851 		if (!enable || !new_plane_crtc ||
10852 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10853 			return 0;
10854 
10855 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10856 
10857 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10858 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10859 			return -EINVAL;
10860 		}
10861 
10862 		if (new_plane_state->fb) {
10863 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10864 						 new_plane_state->fb);
10865 			if (ret)
10866 				return ret;
10867 		}
10868 
10869 		return 0;
10870 	}
10871 
10872 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10873 					 new_plane_state);
10874 
10875 	/* Remove any changed/removed planes */
10876 	if (!enable) {
10877 		if (!needs_reset)
10878 			return 0;
10879 
10880 		if (!old_plane_crtc)
10881 			return 0;
10882 
10883 		old_crtc_state = drm_atomic_get_old_crtc_state(
10884 				state, old_plane_crtc);
10885 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10886 
10887 		if (!dm_old_crtc_state->stream)
10888 			return 0;
10889 
10890 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10891 				plane->base.id, old_plane_crtc->base.id);
10892 
10893 		ret = dm_atomic_get_state(state, &dm_state);
10894 		if (ret)
10895 			return ret;
10896 
10897 		if (!dc_remove_plane_from_context(
10898 				dc,
10899 				dm_old_crtc_state->stream,
10900 				dm_old_plane_state->dc_state,
10901 				dm_state->context)) {
10902 
10903 			return -EINVAL;
10904 		}
10905 
10906 
10907 		dc_plane_state_release(dm_old_plane_state->dc_state);
10908 		dm_new_plane_state->dc_state = NULL;
10909 
10910 		*lock_and_validation_needed = true;
10911 
10912 	} else { /* Add new planes */
10913 		struct dc_plane_state *dc_new_plane_state;
10914 
10915 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10916 			return 0;
10917 
10918 		if (!new_plane_crtc)
10919 			return 0;
10920 
10921 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10922 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10923 
10924 		if (!dm_new_crtc_state->stream)
10925 			return 0;
10926 
10927 		if (!needs_reset)
10928 			return 0;
10929 
10930 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10931 		if (ret)
10932 			return ret;
10933 
10934 		WARN_ON(dm_new_plane_state->dc_state);
10935 
10936 		dc_new_plane_state = dc_create_plane_state(dc);
10937 		if (!dc_new_plane_state)
10938 			return -ENOMEM;
10939 
10940 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10941 				 plane->base.id, new_plane_crtc->base.id);
10942 
10943 		ret = fill_dc_plane_attributes(
10944 			drm_to_adev(new_plane_crtc->dev),
10945 			dc_new_plane_state,
10946 			new_plane_state,
10947 			new_crtc_state);
10948 		if (ret) {
10949 			dc_plane_state_release(dc_new_plane_state);
10950 			return ret;
10951 		}
10952 
10953 		ret = dm_atomic_get_state(state, &dm_state);
10954 		if (ret) {
10955 			dc_plane_state_release(dc_new_plane_state);
10956 			return ret;
10957 		}
10958 
10959 		/*
10960 		 * Any atomic check errors that occur after this will
10961 		 * not need a release. The plane state will be attached
10962 		 * to the stream, and therefore part of the atomic
10963 		 * state. It'll be released when the atomic state is
10964 		 * cleaned.
10965 		 */
10966 		if (!dc_add_plane_to_context(
10967 				dc,
10968 				dm_new_crtc_state->stream,
10969 				dc_new_plane_state,
10970 				dm_state->context)) {
10971 
10972 			dc_plane_state_release(dc_new_plane_state);
10973 			return -EINVAL;
10974 		}
10975 
10976 		dm_new_plane_state->dc_state = dc_new_plane_state;
10977 
10978 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10979 
10980 		/* Tell DC to do a full surface update every time there
10981 		 * is a plane change. Inefficient, but works for now.
10982 		 */
10983 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10984 
10985 		*lock_and_validation_needed = true;
10986 	}
10987 
10988 
10989 	return ret;
10990 }
10991 
10992 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10993 				       int *src_w, int *src_h)
10994 {
10995 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10996 	case DRM_MODE_ROTATE_90:
10997 	case DRM_MODE_ROTATE_270:
10998 		*src_w = plane_state->src_h >> 16;
10999 		*src_h = plane_state->src_w >> 16;
11000 		break;
11001 	case DRM_MODE_ROTATE_0:
11002 	case DRM_MODE_ROTATE_180:
11003 	default:
11004 		*src_w = plane_state->src_w >> 16;
11005 		*src_h = plane_state->src_h >> 16;
11006 		break;
11007 	}
11008 }
11009 
11010 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
11011 				struct drm_crtc *crtc,
11012 				struct drm_crtc_state *new_crtc_state)
11013 {
11014 	struct drm_plane *cursor = crtc->cursor, *underlying;
11015 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
11016 	int i;
11017 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
11018 	int cursor_src_w, cursor_src_h;
11019 	int underlying_src_w, underlying_src_h;
11020 
11021 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
11022 	 * cursor per pipe but it's going to inherit the scaling and
11023 	 * positioning from the underlying pipe. Check the cursor plane's
11024 	 * blending properties match the underlying planes'. */
11025 
11026 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
11027 	if (!new_cursor_state || !new_cursor_state->fb) {
11028 		return 0;
11029 	}
11030 
11031 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
11032 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
11033 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
11034 
11035 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
11036 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
11037 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
11038 			continue;
11039 
11040 		/* Ignore disabled planes */
11041 		if (!new_underlying_state->fb)
11042 			continue;
11043 
11044 		dm_get_oriented_plane_size(new_underlying_state,
11045 					   &underlying_src_w, &underlying_src_h);
11046 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
11047 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
11048 
11049 		if (cursor_scale_w != underlying_scale_w ||
11050 		    cursor_scale_h != underlying_scale_h) {
11051 			drm_dbg_atomic(crtc->dev,
11052 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
11053 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
11054 			return -EINVAL;
11055 		}
11056 
11057 		/* If this plane covers the whole CRTC, no need to check planes underneath */
11058 		if (new_underlying_state->crtc_x <= 0 &&
11059 		    new_underlying_state->crtc_y <= 0 &&
11060 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
11061 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
11062 			break;
11063 	}
11064 
11065 	return 0;
11066 }
11067 
11068 #if defined(CONFIG_DRM_AMD_DC_DCN)
11069 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11070 {
11071 	struct drm_connector *connector;
11072 	struct drm_connector_state *conn_state, *old_conn_state;
11073 	struct amdgpu_dm_connector *aconnector = NULL;
11074 	int i;
11075 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11076 		if (!conn_state->crtc)
11077 			conn_state = old_conn_state;
11078 
11079 		if (conn_state->crtc != crtc)
11080 			continue;
11081 
11082 		aconnector = to_amdgpu_dm_connector(connector);
11083 		if (!aconnector->port || !aconnector->mst_port)
11084 			aconnector = NULL;
11085 		else
11086 			break;
11087 	}
11088 
11089 	if (!aconnector)
11090 		return 0;
11091 
11092 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
11093 }
11094 #endif
11095 
11096 /**
11097  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11098  * @dev: The DRM device
11099  * @state: The atomic state to commit
11100  *
11101  * Validate that the given atomic state is programmable by DC into hardware.
11102  * This involves constructing a &struct dc_state reflecting the new hardware
11103  * state we wish to commit, then querying DC to see if it is programmable. It's
11104  * important not to modify the existing DC state. Otherwise, atomic_check
11105  * may unexpectedly commit hardware changes.
11106  *
11107  * When validating the DC state, it's important that the right locks are
11108  * acquired. For full updates case which removes/adds/updates streams on one
11109  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11110  * that any such full update commit will wait for completion of any outstanding
11111  * flip using DRMs synchronization events.
11112  *
11113  * Note that DM adds the affected connectors for all CRTCs in state, when that
11114  * might not seem necessary. This is because DC stream creation requires the
11115  * DC sink, which is tied to the DRM connector state. Cleaning this up should
11116  * be possible but non-trivial - a possible TODO item.
11117  *
11118  * Return: -Error code if validation failed.
11119  */
11120 static int amdgpu_dm_atomic_check(struct drm_device *dev,
11121 				  struct drm_atomic_state *state)
11122 {
11123 	struct amdgpu_device *adev = drm_to_adev(dev);
11124 	struct dm_atomic_state *dm_state = NULL;
11125 	struct dc *dc = adev->dm.dc;
11126 	struct drm_connector *connector;
11127 	struct drm_connector_state *old_con_state, *new_con_state;
11128 	struct drm_crtc *crtc;
11129 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11130 	struct drm_plane *plane;
11131 	struct drm_plane_state *old_plane_state, *new_plane_state;
11132 	enum dc_status status;
11133 	int ret, i;
11134 	bool lock_and_validation_needed = false;
11135 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11136 #if defined(CONFIG_DRM_AMD_DC_DCN)
11137 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
11138 	struct drm_dp_mst_topology_state *mst_state;
11139 	struct drm_dp_mst_topology_mgr *mgr;
11140 #endif
11141 
11142 	trace_amdgpu_dm_atomic_check_begin(state);
11143 
11144 	ret = drm_atomic_helper_check_modeset(dev, state);
11145 	if (ret) {
11146 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11147 		goto fail;
11148 	}
11149 
11150 	/* Check connector changes */
11151 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11152 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11153 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11154 
11155 		/* Skip connectors that are disabled or part of modeset already. */
11156 		if (!old_con_state->crtc && !new_con_state->crtc)
11157 			continue;
11158 
11159 		if (!new_con_state->crtc)
11160 			continue;
11161 
11162 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11163 		if (IS_ERR(new_crtc_state)) {
11164 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11165 			ret = PTR_ERR(new_crtc_state);
11166 			goto fail;
11167 		}
11168 
11169 		if (dm_old_con_state->abm_level !=
11170 		    dm_new_con_state->abm_level)
11171 			new_crtc_state->connectors_changed = true;
11172 	}
11173 
11174 #if defined(CONFIG_DRM_AMD_DC_DCN)
11175 	if (dc_resource_is_dsc_encoding_supported(dc)) {
11176 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11177 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11178 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11179 				if (ret) {
11180 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11181 					goto fail;
11182 				}
11183 			}
11184 		}
11185 		if (!pre_validate_dsc(state, &dm_state, vars)) {
11186 			ret = -EINVAL;
11187 			goto fail;
11188 		}
11189 	}
11190 #endif
11191 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11192 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11193 
11194 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11195 		    !new_crtc_state->color_mgmt_changed &&
11196 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11197 			dm_old_crtc_state->dsc_force_changed == false)
11198 			continue;
11199 
11200 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11201 		if (ret) {
11202 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11203 			goto fail;
11204 		}
11205 
11206 		if (!new_crtc_state->enable)
11207 			continue;
11208 
11209 		ret = drm_atomic_add_affected_connectors(state, crtc);
11210 		if (ret) {
11211 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11212 			goto fail;
11213 		}
11214 
11215 		ret = drm_atomic_add_affected_planes(state, crtc);
11216 		if (ret) {
11217 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11218 			goto fail;
11219 		}
11220 
11221 		if (dm_old_crtc_state->dsc_force_changed)
11222 			new_crtc_state->mode_changed = true;
11223 	}
11224 
11225 	/*
11226 	 * Add all primary and overlay planes on the CRTC to the state
11227 	 * whenever a plane is enabled to maintain correct z-ordering
11228 	 * and to enable fast surface updates.
11229 	 */
11230 	drm_for_each_crtc(crtc, dev) {
11231 		bool modified = false;
11232 
11233 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11234 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11235 				continue;
11236 
11237 			if (new_plane_state->crtc == crtc ||
11238 			    old_plane_state->crtc == crtc) {
11239 				modified = true;
11240 				break;
11241 			}
11242 		}
11243 
11244 		if (!modified)
11245 			continue;
11246 
11247 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11248 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11249 				continue;
11250 
11251 			new_plane_state =
11252 				drm_atomic_get_plane_state(state, plane);
11253 
11254 			if (IS_ERR(new_plane_state)) {
11255 				ret = PTR_ERR(new_plane_state);
11256 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11257 				goto fail;
11258 			}
11259 		}
11260 	}
11261 
11262 	/* Remove exiting planes if they are modified */
11263 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11264 		ret = dm_update_plane_state(dc, state, plane,
11265 					    old_plane_state,
11266 					    new_plane_state,
11267 					    false,
11268 					    &lock_and_validation_needed);
11269 		if (ret) {
11270 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11271 			goto fail;
11272 		}
11273 	}
11274 
11275 	/* Disable all crtcs which require disable */
11276 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11277 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11278 					   old_crtc_state,
11279 					   new_crtc_state,
11280 					   false,
11281 					   &lock_and_validation_needed);
11282 		if (ret) {
11283 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11284 			goto fail;
11285 		}
11286 	}
11287 
11288 	/* Enable all crtcs which require enable */
11289 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11290 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11291 					   old_crtc_state,
11292 					   new_crtc_state,
11293 					   true,
11294 					   &lock_and_validation_needed);
11295 		if (ret) {
11296 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11297 			goto fail;
11298 		}
11299 	}
11300 
11301 	/* Add new/modified planes */
11302 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11303 		ret = dm_update_plane_state(dc, state, plane,
11304 					    old_plane_state,
11305 					    new_plane_state,
11306 					    true,
11307 					    &lock_and_validation_needed);
11308 		if (ret) {
11309 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11310 			goto fail;
11311 		}
11312 	}
11313 
11314 	/* Run this here since we want to validate the streams we created */
11315 	ret = drm_atomic_helper_check_planes(dev, state);
11316 	if (ret) {
11317 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11318 		goto fail;
11319 	}
11320 
11321 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11322 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11323 		if (dm_new_crtc_state->mpo_requested)
11324 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11325 	}
11326 
11327 	/* Check cursor planes scaling */
11328 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11329 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11330 		if (ret) {
11331 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11332 			goto fail;
11333 		}
11334 	}
11335 
11336 	if (state->legacy_cursor_update) {
11337 		/*
11338 		 * This is a fast cursor update coming from the plane update
11339 		 * helper, check if it can be done asynchronously for better
11340 		 * performance.
11341 		 */
11342 		state->async_update =
11343 			!drm_atomic_helper_async_check(dev, state);
11344 
11345 		/*
11346 		 * Skip the remaining global validation if this is an async
11347 		 * update. Cursor updates can be done without affecting
11348 		 * state or bandwidth calcs and this avoids the performance
11349 		 * penalty of locking the private state object and
11350 		 * allocating a new dc_state.
11351 		 */
11352 		if (state->async_update)
11353 			return 0;
11354 	}
11355 
11356 	/* Check scaling and underscan changes*/
11357 	/* TODO Removed scaling changes validation due to inability to commit
11358 	 * new stream into context w\o causing full reset. Need to
11359 	 * decide how to handle.
11360 	 */
11361 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11362 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11363 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11364 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11365 
11366 		/* Skip any modesets/resets */
11367 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11368 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11369 			continue;
11370 
11371 		/* Skip any thing not scale or underscan changes */
11372 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11373 			continue;
11374 
11375 		lock_and_validation_needed = true;
11376 	}
11377 
11378 #if defined(CONFIG_DRM_AMD_DC_DCN)
11379 	/* set the slot info for each mst_state based on the link encoding format */
11380 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11381 		struct amdgpu_dm_connector *aconnector;
11382 		struct drm_connector *connector;
11383 		struct drm_connector_list_iter iter;
11384 		u8 link_coding_cap;
11385 
11386 		if (!mgr->mst_state )
11387 			continue;
11388 
11389 		drm_connector_list_iter_begin(dev, &iter);
11390 		drm_for_each_connector_iter(connector, &iter) {
11391 			int id = connector->index;
11392 
11393 			if (id == mst_state->mgr->conn_base_id) {
11394 				aconnector = to_amdgpu_dm_connector(connector);
11395 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11396 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11397 
11398 				break;
11399 			}
11400 		}
11401 		drm_connector_list_iter_end(&iter);
11402 
11403 	}
11404 #endif
11405 	/**
11406 	 * Streams and planes are reset when there are changes that affect
11407 	 * bandwidth. Anything that affects bandwidth needs to go through
11408 	 * DC global validation to ensure that the configuration can be applied
11409 	 * to hardware.
11410 	 *
11411 	 * We have to currently stall out here in atomic_check for outstanding
11412 	 * commits to finish in this case because our IRQ handlers reference
11413 	 * DRM state directly - we can end up disabling interrupts too early
11414 	 * if we don't.
11415 	 *
11416 	 * TODO: Remove this stall and drop DM state private objects.
11417 	 */
11418 	if (lock_and_validation_needed) {
11419 		ret = dm_atomic_get_state(state, &dm_state);
11420 		if (ret) {
11421 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11422 			goto fail;
11423 		}
11424 
11425 		ret = do_aquire_global_lock(dev, state);
11426 		if (ret) {
11427 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11428 			goto fail;
11429 		}
11430 
11431 #if defined(CONFIG_DRM_AMD_DC_DCN)
11432 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11433 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11434 			ret = -EINVAL;
11435 			goto fail;
11436 		}
11437 
11438 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11439 		if (ret) {
11440 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11441 			goto fail;
11442 		}
11443 #endif
11444 
11445 		/*
11446 		 * Perform validation of MST topology in the state:
11447 		 * We need to perform MST atomic check before calling
11448 		 * dc_validate_global_state(), or there is a chance
11449 		 * to get stuck in an infinite loop and hang eventually.
11450 		 */
11451 		ret = drm_dp_mst_atomic_check(state);
11452 		if (ret) {
11453 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11454 			goto fail;
11455 		}
11456 		status = dc_validate_global_state(dc, dm_state->context, true);
11457 		if (status != DC_OK) {
11458 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11459 				       dc_status_to_str(status), status);
11460 			ret = -EINVAL;
11461 			goto fail;
11462 		}
11463 	} else {
11464 		/*
11465 		 * The commit is a fast update. Fast updates shouldn't change
11466 		 * the DC context, affect global validation, and can have their
11467 		 * commit work done in parallel with other commits not touching
11468 		 * the same resource. If we have a new DC context as part of
11469 		 * the DM atomic state from validation we need to free it and
11470 		 * retain the existing one instead.
11471 		 *
11472 		 * Furthermore, since the DM atomic state only contains the DC
11473 		 * context and can safely be annulled, we can free the state
11474 		 * and clear the associated private object now to free
11475 		 * some memory and avoid a possible use-after-free later.
11476 		 */
11477 
11478 		for (i = 0; i < state->num_private_objs; i++) {
11479 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11480 
11481 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11482 				int j = state->num_private_objs-1;
11483 
11484 				dm_atomic_destroy_state(obj,
11485 						state->private_objs[i].state);
11486 
11487 				/* If i is not at the end of the array then the
11488 				 * last element needs to be moved to where i was
11489 				 * before the array can safely be truncated.
11490 				 */
11491 				if (i != j)
11492 					state->private_objs[i] =
11493 						state->private_objs[j];
11494 
11495 				state->private_objs[j].ptr = NULL;
11496 				state->private_objs[j].state = NULL;
11497 				state->private_objs[j].old_state = NULL;
11498 				state->private_objs[j].new_state = NULL;
11499 
11500 				state->num_private_objs = j;
11501 				break;
11502 			}
11503 		}
11504 	}
11505 
11506 	/* Store the overall update type for use later in atomic check. */
11507 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11508 		struct dm_crtc_state *dm_new_crtc_state =
11509 			to_dm_crtc_state(new_crtc_state);
11510 
11511 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11512 							 UPDATE_TYPE_FULL :
11513 							 UPDATE_TYPE_FAST;
11514 	}
11515 
11516 	/* Must be success */
11517 	WARN_ON(ret);
11518 
11519 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11520 
11521 	return ret;
11522 
11523 fail:
11524 	if (ret == -EDEADLK)
11525 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11526 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11527 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11528 	else
11529 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11530 
11531 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11532 
11533 	return ret;
11534 }
11535 
11536 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11537 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11538 {
11539 	uint8_t dpcd_data;
11540 	bool capable = false;
11541 
11542 	if (amdgpu_dm_connector->dc_link &&
11543 		dm_helpers_dp_read_dpcd(
11544 				NULL,
11545 				amdgpu_dm_connector->dc_link,
11546 				DP_DOWN_STREAM_PORT_COUNT,
11547 				&dpcd_data,
11548 				sizeof(dpcd_data))) {
11549 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11550 	}
11551 
11552 	return capable;
11553 }
11554 
11555 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11556 		unsigned int offset,
11557 		unsigned int total_length,
11558 		uint8_t *data,
11559 		unsigned int length,
11560 		struct amdgpu_hdmi_vsdb_info *vsdb)
11561 {
11562 	bool res;
11563 	union dmub_rb_cmd cmd;
11564 	struct dmub_cmd_send_edid_cea *input;
11565 	struct dmub_cmd_edid_cea_output *output;
11566 
11567 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11568 		return false;
11569 
11570 	memset(&cmd, 0, sizeof(cmd));
11571 
11572 	input = &cmd.edid_cea.data.input;
11573 
11574 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11575 	cmd.edid_cea.header.sub_type = 0;
11576 	cmd.edid_cea.header.payload_bytes =
11577 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11578 	input->offset = offset;
11579 	input->length = length;
11580 	input->cea_total_length = total_length;
11581 	memcpy(input->payload, data, length);
11582 
11583 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11584 	if (!res) {
11585 		DRM_ERROR("EDID CEA parser failed\n");
11586 		return false;
11587 	}
11588 
11589 	output = &cmd.edid_cea.data.output;
11590 
11591 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11592 		if (!output->ack.success) {
11593 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11594 					output->ack.offset);
11595 		}
11596 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11597 		if (!output->amd_vsdb.vsdb_found)
11598 			return false;
11599 
11600 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11601 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11602 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11603 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11604 	} else {
11605 		DRM_WARN("Unknown EDID CEA parser results\n");
11606 		return false;
11607 	}
11608 
11609 	return true;
11610 }
11611 
11612 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11613 		uint8_t *edid_ext, int len,
11614 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11615 {
11616 	int i;
11617 
11618 	/* send extension block to DMCU for parsing */
11619 	for (i = 0; i < len; i += 8) {
11620 		bool res;
11621 		int offset;
11622 
11623 		/* send 8 bytes a time */
11624 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11625 			return false;
11626 
11627 		if (i+8 == len) {
11628 			/* EDID block sent completed, expect result */
11629 			int version, min_rate, max_rate;
11630 
11631 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11632 			if (res) {
11633 				/* amd vsdb found */
11634 				vsdb_info->freesync_supported = 1;
11635 				vsdb_info->amd_vsdb_version = version;
11636 				vsdb_info->min_refresh_rate_hz = min_rate;
11637 				vsdb_info->max_refresh_rate_hz = max_rate;
11638 				return true;
11639 			}
11640 			/* not amd vsdb */
11641 			return false;
11642 		}
11643 
11644 		/* check for ack*/
11645 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11646 		if (!res)
11647 			return false;
11648 	}
11649 
11650 	return false;
11651 }
11652 
11653 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11654 		uint8_t *edid_ext, int len,
11655 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11656 {
11657 	int i;
11658 
11659 	/* send extension block to DMCU for parsing */
11660 	for (i = 0; i < len; i += 8) {
11661 		/* send 8 bytes a time */
11662 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11663 			return false;
11664 	}
11665 
11666 	return vsdb_info->freesync_supported;
11667 }
11668 
11669 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11670 		uint8_t *edid_ext, int len,
11671 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11672 {
11673 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11674 
11675 	if (adev->dm.dmub_srv)
11676 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11677 	else
11678 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11679 }
11680 
11681 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11682 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11683 {
11684 	uint8_t *edid_ext = NULL;
11685 	int i;
11686 	bool valid_vsdb_found = false;
11687 
11688 	/*----- drm_find_cea_extension() -----*/
11689 	/* No EDID or EDID extensions */
11690 	if (edid == NULL || edid->extensions == 0)
11691 		return -ENODEV;
11692 
11693 	/* Find CEA extension */
11694 	for (i = 0; i < edid->extensions; i++) {
11695 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11696 		if (edid_ext[0] == CEA_EXT)
11697 			break;
11698 	}
11699 
11700 	if (i == edid->extensions)
11701 		return -ENODEV;
11702 
11703 	/*----- cea_db_offsets() -----*/
11704 	if (edid_ext[0] != CEA_EXT)
11705 		return -ENODEV;
11706 
11707 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11708 
11709 	return valid_vsdb_found ? i : -ENODEV;
11710 }
11711 
11712 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11713 					struct edid *edid)
11714 {
11715 	int i = 0;
11716 	struct detailed_timing *timing;
11717 	struct detailed_non_pixel *data;
11718 	struct detailed_data_monitor_range *range;
11719 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11720 			to_amdgpu_dm_connector(connector);
11721 	struct dm_connector_state *dm_con_state = NULL;
11722 	struct dc_sink *sink;
11723 
11724 	struct drm_device *dev = connector->dev;
11725 	struct amdgpu_device *adev = drm_to_adev(dev);
11726 	bool freesync_capable = false;
11727 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11728 
11729 	if (!connector->state) {
11730 		DRM_ERROR("%s - Connector has no state", __func__);
11731 		goto update;
11732 	}
11733 
11734 	sink = amdgpu_dm_connector->dc_sink ?
11735 		amdgpu_dm_connector->dc_sink :
11736 		amdgpu_dm_connector->dc_em_sink;
11737 
11738 	if (!edid || !sink) {
11739 		dm_con_state = to_dm_connector_state(connector->state);
11740 
11741 		amdgpu_dm_connector->min_vfreq = 0;
11742 		amdgpu_dm_connector->max_vfreq = 0;
11743 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11744 		connector->display_info.monitor_range.min_vfreq = 0;
11745 		connector->display_info.monitor_range.max_vfreq = 0;
11746 		freesync_capable = false;
11747 
11748 		goto update;
11749 	}
11750 
11751 	dm_con_state = to_dm_connector_state(connector->state);
11752 
11753 	if (!adev->dm.freesync_module)
11754 		goto update;
11755 
11756 
11757 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11758 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11759 		bool edid_check_required = false;
11760 
11761 		if (edid) {
11762 			edid_check_required = is_dp_capable_without_timing_msa(
11763 						adev->dm.dc,
11764 						amdgpu_dm_connector);
11765 		}
11766 
11767 		if (edid_check_required == true && (edid->version > 1 ||
11768 		   (edid->version == 1 && edid->revision > 1))) {
11769 			for (i = 0; i < 4; i++) {
11770 
11771 				timing	= &edid->detailed_timings[i];
11772 				data	= &timing->data.other_data;
11773 				range	= &data->data.range;
11774 				/*
11775 				 * Check if monitor has continuous frequency mode
11776 				 */
11777 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11778 					continue;
11779 				/*
11780 				 * Check for flag range limits only. If flag == 1 then
11781 				 * no additional timing information provided.
11782 				 * Default GTF, GTF Secondary curve and CVT are not
11783 				 * supported
11784 				 */
11785 				if (range->flags != 1)
11786 					continue;
11787 
11788 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11789 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11790 				amdgpu_dm_connector->pixel_clock_mhz =
11791 					range->pixel_clock_mhz * 10;
11792 
11793 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11794 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11795 
11796 				break;
11797 			}
11798 
11799 			if (amdgpu_dm_connector->max_vfreq -
11800 			    amdgpu_dm_connector->min_vfreq > 10) {
11801 
11802 				freesync_capable = true;
11803 			}
11804 		}
11805 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11806 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11807 		if (i >= 0 && vsdb_info.freesync_supported) {
11808 			timing  = &edid->detailed_timings[i];
11809 			data    = &timing->data.other_data;
11810 
11811 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11812 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11813 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11814 				freesync_capable = true;
11815 
11816 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11817 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11818 		}
11819 	}
11820 
11821 update:
11822 	if (dm_con_state)
11823 		dm_con_state->freesync_capable = freesync_capable;
11824 
11825 	if (connector->vrr_capable_property)
11826 		drm_connector_set_vrr_capable_property(connector,
11827 						       freesync_capable);
11828 }
11829 
11830 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11831 {
11832 	struct amdgpu_device *adev = drm_to_adev(dev);
11833 	struct dc *dc = adev->dm.dc;
11834 	int i;
11835 
11836 	mutex_lock(&adev->dm.dc_lock);
11837 	if (dc->current_state) {
11838 		for (i = 0; i < dc->current_state->stream_count; ++i)
11839 			dc->current_state->streams[i]
11840 				->triggered_crtc_reset.enabled =
11841 				adev->dm.force_timing_sync;
11842 
11843 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11844 		dc_trigger_sync(dc, dc->current_state);
11845 	}
11846 	mutex_unlock(&adev->dm.dc_lock);
11847 }
11848 
11849 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11850 		       uint32_t value, const char *func_name)
11851 {
11852 #ifdef DM_CHECK_ADDR_0
11853 	if (address == 0) {
11854 		DC_ERR("invalid register write. address = 0");
11855 		return;
11856 	}
11857 #endif
11858 	cgs_write_register(ctx->cgs_device, address, value);
11859 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11860 }
11861 
11862 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11863 			  const char *func_name)
11864 {
11865 	uint32_t value;
11866 #ifdef DM_CHECK_ADDR_0
11867 	if (address == 0) {
11868 		DC_ERR("invalid register read; address = 0\n");
11869 		return 0;
11870 	}
11871 #endif
11872 
11873 	if (ctx->dmub_srv &&
11874 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11875 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11876 		ASSERT(false);
11877 		return 0;
11878 	}
11879 
11880 	value = cgs_read_register(ctx->cgs_device, address);
11881 
11882 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11883 
11884 	return value;
11885 }
11886 
11887 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11888 						struct dc_context *ctx,
11889 						uint8_t status_type,
11890 						uint32_t *operation_result)
11891 {
11892 	struct amdgpu_device *adev = ctx->driver_context;
11893 	int return_status = -1;
11894 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11895 
11896 	if (is_cmd_aux) {
11897 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11898 			return_status = p_notify->aux_reply.length;
11899 			*operation_result = p_notify->result;
11900 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11901 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11902 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11903 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11904 		} else {
11905 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11906 		}
11907 	} else {
11908 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11909 			return_status = 0;
11910 			*operation_result = p_notify->sc_status;
11911 		} else {
11912 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11913 		}
11914 	}
11915 
11916 	return return_status;
11917 }
11918 
11919 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11920 	unsigned int link_index, void *cmd_payload, void *operation_result)
11921 {
11922 	struct amdgpu_device *adev = ctx->driver_context;
11923 	int ret = 0;
11924 
11925 	if (is_cmd_aux) {
11926 		dc_process_dmub_aux_transfer_async(ctx->dc,
11927 			link_index, (struct aux_payload *)cmd_payload);
11928 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11929 					(struct set_config_cmd_payload *)cmd_payload,
11930 					adev->dm.dmub_notify)) {
11931 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11932 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11933 					(uint32_t *)operation_result);
11934 	}
11935 
11936 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11937 	if (ret == 0) {
11938 		DRM_ERROR("wait_for_completion_timeout timeout!");
11939 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11940 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11941 				(uint32_t *)operation_result);
11942 	}
11943 
11944 	if (is_cmd_aux) {
11945 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11946 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11947 
11948 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11949 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11950 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11951 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11952 				       adev->dm.dmub_notify->aux_reply.length);
11953 			}
11954 		}
11955 	}
11956 
11957 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11958 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11959 			(uint32_t *)operation_result);
11960 }
11961 
11962 /*
11963  * Check whether seamless boot is supported.
11964  *
11965  * So far we only support seamless boot on CHIP_VANGOGH.
11966  * If everything goes well, we may consider expanding
11967  * seamless boot to other ASICs.
11968  */
11969 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11970 {
11971 	switch (adev->asic_type) {
11972 	case CHIP_VANGOGH:
11973 		if (!adev->mman.keep_stolen_vga_memory)
11974 			return true;
11975 		break;
11976 	default:
11977 		break;
11978 	}
11979 
11980 	return false;
11981 }
11982