1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_blend.h>
29 #include <drm/drm_gem_atomic_helper.h>
30 #include <drm/drm_plane_helper.h>
31 #include <drm/drm_fourcc.h>
32 
33 #include "amdgpu.h"
34 #include "dal_asic_id.h"
35 #include "amdgpu_display.h"
36 #include "amdgpu_dm_trace.h"
37 #include "gc/gc_11_0_0_offset.h"
38 #include "gc/gc_11_0_0_sh_mask.h"
39 
40 /*
41  * TODO: these are currently initialized to rgb formats only.
42  * For future use cases we should either initialize them dynamically based on
43  * plane capabilities, or initialize this array to all formats, so internal drm
44  * check will succeed, and let DC implement proper check
45  */
46 static const uint32_t rgb_formats[] = {
47 	DRM_FORMAT_XRGB8888,
48 	DRM_FORMAT_ARGB8888,
49 	DRM_FORMAT_RGBA8888,
50 	DRM_FORMAT_XRGB2101010,
51 	DRM_FORMAT_XBGR2101010,
52 	DRM_FORMAT_ARGB2101010,
53 	DRM_FORMAT_ABGR2101010,
54 	DRM_FORMAT_XRGB16161616,
55 	DRM_FORMAT_XBGR16161616,
56 	DRM_FORMAT_ARGB16161616,
57 	DRM_FORMAT_ABGR16161616,
58 	DRM_FORMAT_XBGR8888,
59 	DRM_FORMAT_ABGR8888,
60 	DRM_FORMAT_RGB565,
61 };
62 
63 static const uint32_t overlay_formats[] = {
64 	DRM_FORMAT_XRGB8888,
65 	DRM_FORMAT_ARGB8888,
66 	DRM_FORMAT_RGBA8888,
67 	DRM_FORMAT_XBGR8888,
68 	DRM_FORMAT_ABGR8888,
69 	DRM_FORMAT_RGB565
70 };
71 
72 static const u32 cursor_formats[] = {
73 	DRM_FORMAT_ARGB8888
74 };
75 
76 enum dm_micro_swizzle {
77 	MICRO_SWIZZLE_Z = 0,
78 	MICRO_SWIZZLE_S = 1,
79 	MICRO_SWIZZLE_D = 2,
80 	MICRO_SWIZZLE_R = 3
81 };
82 
83 const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
84 {
85 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
86 }
87 
88 void fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
89 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
90 			       bool *global_alpha, int *global_alpha_value)
91 {
92 	*per_pixel_alpha = false;
93 	*pre_multiplied_alpha = true;
94 	*global_alpha = false;
95 	*global_alpha_value = 0xff;
96 
97 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
98 		return;
99 
100 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
101 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
102 		static const uint32_t alpha_formats[] = {
103 			DRM_FORMAT_ARGB8888,
104 			DRM_FORMAT_RGBA8888,
105 			DRM_FORMAT_ABGR8888,
106 		};
107 		uint32_t format = plane_state->fb->format->format;
108 		unsigned int i;
109 
110 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
111 			if (format == alpha_formats[i]) {
112 				*per_pixel_alpha = true;
113 				break;
114 			}
115 		}
116 
117 		if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
118 			*pre_multiplied_alpha = false;
119 	}
120 
121 	if (plane_state->alpha < 0xffff) {
122 		*global_alpha = true;
123 		*global_alpha_value = plane_state->alpha >> 8;
124 	}
125 }
126 
127 static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
128 {
129 	if (!*mods)
130 		return;
131 
132 	if (*cap - *size < 1) {
133 		uint64_t new_cap = *cap * 2;
134 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
135 
136 		if (!new_mods) {
137 			kfree(*mods);
138 			*mods = NULL;
139 			return;
140 		}
141 
142 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
143 		kfree(*mods);
144 		*mods = new_mods;
145 		*cap = new_cap;
146 	}
147 
148 	(*mods)[*size] = mod;
149 	*size += 1;
150 }
151 
152 bool modifier_has_dcc(uint64_t modifier)
153 {
154 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
155 }
156 
157 unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
158 {
159 	if (modifier == DRM_FORMAT_MOD_LINEAR)
160 		return 0;
161 
162 	return AMD_FMT_MOD_GET(TILE, modifier);
163 }
164 
165 static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
166 				 uint64_t tiling_flags)
167 {
168 	/* Fill GFX8 params */
169 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
170 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
171 
172 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
173 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
174 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
175 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
176 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
177 
178 		/* XXX fix me for VI */
179 		tiling_info->gfx8.num_banks = num_banks;
180 		tiling_info->gfx8.array_mode =
181 				DC_ARRAY_2D_TILED_THIN1;
182 		tiling_info->gfx8.tile_split = tile_split;
183 		tiling_info->gfx8.bank_width = bankw;
184 		tiling_info->gfx8.bank_height = bankh;
185 		tiling_info->gfx8.tile_aspect = mtaspect;
186 		tiling_info->gfx8.tile_mode =
187 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
188 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
189 			== DC_ARRAY_1D_TILED_THIN1) {
190 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
191 	}
192 
193 	tiling_info->gfx8.pipe_config =
194 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
195 }
196 
197 static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
198 				  union dc_tiling_info *tiling_info)
199 {
200 	/* Fill GFX9 params */
201 	tiling_info->gfx9.num_pipes =
202 		adev->gfx.config.gb_addr_config_fields.num_pipes;
203 	tiling_info->gfx9.num_banks =
204 		adev->gfx.config.gb_addr_config_fields.num_banks;
205 	tiling_info->gfx9.pipe_interleave =
206 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
207 	tiling_info->gfx9.num_shader_engines =
208 		adev->gfx.config.gb_addr_config_fields.num_se;
209 	tiling_info->gfx9.max_compressed_frags =
210 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
211 	tiling_info->gfx9.num_rb_per_se =
212 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
213 	tiling_info->gfx9.shaderEnable = 1;
214 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
215 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
216 }
217 
218 static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
219 				    union dc_tiling_info *tiling_info,
220 				    uint64_t modifier)
221 {
222 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
223 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
224 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
225 	unsigned int pipes_log2;
226 
227 	pipes_log2 = min(5u, mod_pipe_xor_bits);
228 
229 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
230 
231 	if (!IS_AMD_FMT_MOD(modifier))
232 		return;
233 
234 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
235 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
236 
237 	if (adev->family >= AMDGPU_FAMILY_NV) {
238 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
239 	} else {
240 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
241 
242 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
243 	}
244 }
245 
246 static int validate_dcc(struct amdgpu_device *adev,
247 	     const enum surface_pixel_format format,
248 	     const enum dc_rotation_angle rotation,
249 	     const union dc_tiling_info *tiling_info,
250 	     const struct dc_plane_dcc_param *dcc,
251 	     const struct dc_plane_address *address,
252 	     const struct plane_size *plane_size)
253 {
254 	struct dc *dc = adev->dm.dc;
255 	struct dc_dcc_surface_param input;
256 	struct dc_surface_dcc_cap output;
257 
258 	memset(&input, 0, sizeof(input));
259 	memset(&output, 0, sizeof(output));
260 
261 	if (!dcc->enable)
262 		return 0;
263 
264 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
265 	    !dc->cap_funcs.get_dcc_compression_cap)
266 		return -EINVAL;
267 
268 	input.format = format;
269 	input.surface_size.width = plane_size->surface_size.width;
270 	input.surface_size.height = plane_size->surface_size.height;
271 	input.swizzle_mode = tiling_info->gfx9.swizzle;
272 
273 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
274 		input.scan = SCAN_DIRECTION_HORIZONTAL;
275 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
276 		input.scan = SCAN_DIRECTION_VERTICAL;
277 
278 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
279 		return -EINVAL;
280 
281 	if (!output.capable)
282 		return -EINVAL;
283 
284 	if (dcc->independent_64b_blks == 0 &&
285 	    output.grph.rgb.independent_64b_blks != 0)
286 		return -EINVAL;
287 
288 	return 0;
289 }
290 
291 static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
292 					  const struct amdgpu_framebuffer *afb,
293 					  const enum surface_pixel_format format,
294 					  const enum dc_rotation_angle rotation,
295 					  const struct plane_size *plane_size,
296 					  union dc_tiling_info *tiling_info,
297 					  struct dc_plane_dcc_param *dcc,
298 					  struct dc_plane_address *address,
299 					  const bool force_disable_dcc)
300 {
301 	const uint64_t modifier = afb->base.modifier;
302 	int ret = 0;
303 
304 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
305 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
306 
307 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
308 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
309 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
310 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
311 
312 		dcc->enable = 1;
313 		dcc->meta_pitch = afb->base.pitches[1];
314 		dcc->independent_64b_blks = independent_64b_blks;
315 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
316 			if (independent_64b_blks && independent_128b_blks)
317 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
318 			else if (independent_128b_blks)
319 				dcc->dcc_ind_blk = hubp_ind_block_128b;
320 			else if (independent_64b_blks && !independent_128b_blks)
321 				dcc->dcc_ind_blk = hubp_ind_block_64b;
322 			else
323 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
324 		} else {
325 			if (independent_64b_blks)
326 				dcc->dcc_ind_blk = hubp_ind_block_64b;
327 			else
328 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
329 		}
330 
331 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
332 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
333 	}
334 
335 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
336 	if (ret)
337 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
338 
339 	return ret;
340 }
341 
342 static void add_gfx10_1_modifiers(const struct amdgpu_device *adev,
343 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
344 {
345 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
346 
347 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
348 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
349 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
350 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
351 		    AMD_FMT_MOD_SET(DCC, 1) |
352 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
353 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
354 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
355 
356 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
357 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
358 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
359 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
360 		    AMD_FMT_MOD_SET(DCC, 1) |
361 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
362 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
363 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
364 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
365 
366 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
367 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
368 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
369 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
370 
371 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
372 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
373 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
374 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
375 
376 
377 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
378 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
379 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
380 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
381 
382 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
383 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
384 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
385 }
386 
387 static void add_gfx9_modifiers(const struct amdgpu_device *adev,
388 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
389 {
390 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
391 	int pipe_xor_bits = min(8, pipes +
392 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
393 	int bank_xor_bits = min(8 - pipe_xor_bits,
394 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
395 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
396 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
397 
398 
399 	if (adev->family == AMDGPU_FAMILY_RV) {
400 		/* Raven2 and later */
401 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
402 
403 		/*
404 		 * No _D DCC swizzles yet because we only allow 32bpp, which
405 		 * doesn't support _D on DCN
406 		 */
407 
408 		if (has_constant_encode) {
409 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
410 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
411 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
412 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
413 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
414 				    AMD_FMT_MOD_SET(DCC, 1) |
415 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
416 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
417 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
418 		}
419 
420 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
421 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
422 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
423 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
424 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
425 			    AMD_FMT_MOD_SET(DCC, 1) |
426 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
427 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
428 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
429 
430 		if (has_constant_encode) {
431 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
432 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
433 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
434 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
435 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
436 				    AMD_FMT_MOD_SET(DCC, 1) |
437 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
438 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
439 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
440 
441 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
442 				    AMD_FMT_MOD_SET(RB, rb) |
443 				    AMD_FMT_MOD_SET(PIPE, pipes));
444 		}
445 
446 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
447 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
448 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
449 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
450 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
451 			    AMD_FMT_MOD_SET(DCC, 1) |
452 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
453 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
454 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
455 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
456 			    AMD_FMT_MOD_SET(RB, rb) |
457 			    AMD_FMT_MOD_SET(PIPE, pipes));
458 	}
459 
460 	/*
461 	 * Only supported for 64bpp on Raven, will be filtered on format in
462 	 * dm_plane_format_mod_supported.
463 	 */
464 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
465 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
466 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
467 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
468 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
469 
470 	if (adev->family == AMDGPU_FAMILY_RV) {
471 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
472 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
473 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
474 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
475 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
476 	}
477 
478 	/*
479 	 * Only supported for 64bpp on Raven, will be filtered on format in
480 	 * dm_plane_format_mod_supported.
481 	 */
482 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
483 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
484 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
485 
486 	if (adev->family == AMDGPU_FAMILY_RV) {
487 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
488 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
489 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
490 	}
491 }
492 
493 static void add_gfx10_3_modifiers(const struct amdgpu_device *adev,
494 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
495 {
496 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
497 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
498 
499 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
500 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
501 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
502 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
503 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
504 		    AMD_FMT_MOD_SET(DCC, 1) |
505 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
506 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
507 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
508 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
509 
510 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
511 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
512 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
513 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
514 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
515 		    AMD_FMT_MOD_SET(DCC, 1) |
516 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
517 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
518 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
519 
520 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
521 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
522 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
523 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
524 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
525 		    AMD_FMT_MOD_SET(DCC, 1) |
526 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
527 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
528 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
529 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
530 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
531 
532 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
533 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
534 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
535 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
536 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
537 		    AMD_FMT_MOD_SET(DCC, 1) |
538 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
539 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
540 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
541 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
542 
543 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
544 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
545 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
546 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
547 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
548 
549 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
550 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
551 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
552 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
553 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
554 
555 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
556 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
557 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
558 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
559 
560 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
561 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
562 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
563 }
564 
565 static void add_gfx11_modifiers(struct amdgpu_device *adev,
566 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
567 {
568 	int num_pipes = 0;
569 	int pipe_xor_bits = 0;
570 	int num_pkrs = 0;
571 	int pkrs = 0;
572 	u32 gb_addr_config;
573 	u8 i = 0;
574 	unsigned swizzle_r_x;
575 	uint64_t modifier_r_x;
576 	uint64_t modifier_dcc_best;
577 	uint64_t modifier_dcc_4k;
578 
579 	/* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
580 	 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes}
581 	 */
582 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
583 	ASSERT(gb_addr_config != 0);
584 
585 	num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
586 	pkrs = ilog2(num_pkrs);
587 	num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
588 	pipe_xor_bits = ilog2(num_pipes);
589 
590 	for (i = 0; i < 2; i++) {
591 		/* Insert the best one first. */
592 		/* R_X swizzle modes are the best for rendering and DCC requires them. */
593 		if (num_pipes > 16)
594 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
595 		else
596 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
597 
598 		modifier_r_x = AMD_FMT_MOD |
599 			       AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
600 			       AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
601 			       AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
602 			       AMD_FMT_MOD_SET(PACKERS, pkrs);
603 
604 		/* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
605 		modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
606 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
607 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
608 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
609 
610 		/* DCC settings for 4K and greater resolutions. (required by display hw) */
611 		modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
612 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
613 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
614 				  AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
615 
616 		add_modifier(mods, size, capacity, modifier_dcc_best);
617 		add_modifier(mods, size, capacity, modifier_dcc_4k);
618 
619 		add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
620 		add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
621 
622 		add_modifier(mods, size, capacity, modifier_r_x);
623 	}
624 
625 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
626 			AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
627 			AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
628 }
629 
630 static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
631 {
632 	uint64_t size = 0, capacity = 128;
633 	*mods = NULL;
634 
635 	/* We have not hooked up any pre-GFX9 modifiers. */
636 	if (adev->family < AMDGPU_FAMILY_AI)
637 		return 0;
638 
639 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
640 
641 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
642 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
643 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
644 		return *mods ? 0 : -ENOMEM;
645 	}
646 
647 	switch (adev->family) {
648 	case AMDGPU_FAMILY_AI:
649 	case AMDGPU_FAMILY_RV:
650 		add_gfx9_modifiers(adev, mods, &size, &capacity);
651 		break;
652 	case AMDGPU_FAMILY_NV:
653 	case AMDGPU_FAMILY_VGH:
654 	case AMDGPU_FAMILY_YC:
655 	case AMDGPU_FAMILY_GC_10_3_6:
656 	case AMDGPU_FAMILY_GC_10_3_7:
657 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
658 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
659 		else
660 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
661 		break;
662 	case AMDGPU_FAMILY_GC_11_0_0:
663 	case AMDGPU_FAMILY_GC_11_0_2:
664 		add_gfx11_modifiers(adev, mods, &size, &capacity);
665 		break;
666 	}
667 
668 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
669 
670 	/* INVALID marks the end of the list. */
671 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
672 
673 	if (!*mods)
674 		return -ENOMEM;
675 
676 	return 0;
677 }
678 
679 static int get_plane_formats(const struct drm_plane *plane,
680 			     const struct dc_plane_cap *plane_cap,
681 			     uint32_t *formats, int max_formats)
682 {
683 	int i, num_formats = 0;
684 
685 	/*
686 	 * TODO: Query support for each group of formats directly from
687 	 * DC plane caps. This will require adding more formats to the
688 	 * caps list.
689 	 */
690 
691 	switch (plane->type) {
692 	case DRM_PLANE_TYPE_PRIMARY:
693 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
694 			if (num_formats >= max_formats)
695 				break;
696 
697 			formats[num_formats++] = rgb_formats[i];
698 		}
699 
700 		if (plane_cap && plane_cap->pixel_format_support.nv12)
701 			formats[num_formats++] = DRM_FORMAT_NV12;
702 		if (plane_cap && plane_cap->pixel_format_support.p010)
703 			formats[num_formats++] = DRM_FORMAT_P010;
704 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
705 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
706 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
707 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
708 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
709 		}
710 		break;
711 
712 	case DRM_PLANE_TYPE_OVERLAY:
713 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
714 			if (num_formats >= max_formats)
715 				break;
716 
717 			formats[num_formats++] = overlay_formats[i];
718 		}
719 		break;
720 
721 	case DRM_PLANE_TYPE_CURSOR:
722 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
723 			if (num_formats >= max_formats)
724 				break;
725 
726 			formats[num_formats++] = cursor_formats[i];
727 		}
728 		break;
729 	}
730 
731 	return num_formats;
732 }
733 
734 #ifdef CONFIG_DRM_AMD_DC_HDR
735 static int attach_color_mgmt_properties(struct amdgpu_display_manager *dm, struct drm_plane *plane)
736 {
737 	drm_object_attach_property(&plane->base,
738 				   dm->degamma_lut_property,
739 				   0);
740 	drm_object_attach_property(&plane->base,
741 				   dm->degamma_lut_size_property,
742 				   MAX_COLOR_LUT_ENTRIES);
743 	drm_object_attach_property(&plane->base, dm->ctm_property,
744 				   0);
745 	drm_object_attach_property(&plane->base, dm->sdr_boost_property,
746 				   DEFAULT_SDR_BOOST);
747 
748 	return 0;
749 }
750 #endif
751 
752 int fill_plane_buffer_attributes(struct amdgpu_device *adev,
753 			     const struct amdgpu_framebuffer *afb,
754 			     const enum surface_pixel_format format,
755 			     const enum dc_rotation_angle rotation,
756 			     const uint64_t tiling_flags,
757 			     union dc_tiling_info *tiling_info,
758 			     struct plane_size *plane_size,
759 			     struct dc_plane_dcc_param *dcc,
760 			     struct dc_plane_address *address,
761 			     bool tmz_surface,
762 			     bool force_disable_dcc)
763 {
764 	const struct drm_framebuffer *fb = &afb->base;
765 	int ret;
766 
767 	memset(tiling_info, 0, sizeof(*tiling_info));
768 	memset(plane_size, 0, sizeof(*plane_size));
769 	memset(dcc, 0, sizeof(*dcc));
770 	memset(address, 0, sizeof(*address));
771 
772 	address->tmz_surface = tmz_surface;
773 
774 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
775 		uint64_t addr = afb->address + fb->offsets[0];
776 
777 		plane_size->surface_size.x = 0;
778 		plane_size->surface_size.y = 0;
779 		plane_size->surface_size.width = fb->width;
780 		plane_size->surface_size.height = fb->height;
781 		plane_size->surface_pitch =
782 			fb->pitches[0] / fb->format->cpp[0];
783 
784 		address->type = PLN_ADDR_TYPE_GRAPHICS;
785 		address->grph.addr.low_part = lower_32_bits(addr);
786 		address->grph.addr.high_part = upper_32_bits(addr);
787 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
788 		uint64_t luma_addr = afb->address + fb->offsets[0];
789 		uint64_t chroma_addr = afb->address + fb->offsets[1];
790 
791 		plane_size->surface_size.x = 0;
792 		plane_size->surface_size.y = 0;
793 		plane_size->surface_size.width = fb->width;
794 		plane_size->surface_size.height = fb->height;
795 		plane_size->surface_pitch =
796 			fb->pitches[0] / fb->format->cpp[0];
797 
798 		plane_size->chroma_size.x = 0;
799 		plane_size->chroma_size.y = 0;
800 		/* TODO: set these based on surface format */
801 		plane_size->chroma_size.width = fb->width / 2;
802 		plane_size->chroma_size.height = fb->height / 2;
803 
804 		plane_size->chroma_pitch =
805 			fb->pitches[1] / fb->format->cpp[1];
806 
807 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
808 		address->video_progressive.luma_addr.low_part =
809 			lower_32_bits(luma_addr);
810 		address->video_progressive.luma_addr.high_part =
811 			upper_32_bits(luma_addr);
812 		address->video_progressive.chroma_addr.low_part =
813 			lower_32_bits(chroma_addr);
814 		address->video_progressive.chroma_addr.high_part =
815 			upper_32_bits(chroma_addr);
816 	}
817 
818 	if (adev->family >= AMDGPU_FAMILY_AI) {
819 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
820 								rotation, plane_size,
821 								tiling_info, dcc,
822 								address,
823 								force_disable_dcc);
824 		if (ret)
825 			return ret;
826 	} else {
827 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
828 	}
829 
830 	return 0;
831 }
832 
833 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
834 				      struct drm_plane_state *new_state)
835 {
836 	struct amdgpu_framebuffer *afb;
837 	struct drm_gem_object *obj;
838 	struct amdgpu_device *adev;
839 	struct amdgpu_bo *rbo;
840 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
841 	uint32_t domain;
842 	int r;
843 
844 	if (!new_state->fb) {
845 		DRM_DEBUG_KMS("No FB bound\n");
846 		return 0;
847 	}
848 
849 	afb = to_amdgpu_framebuffer(new_state->fb);
850 	obj = new_state->fb->obj[0];
851 	rbo = gem_to_amdgpu_bo(obj);
852 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
853 
854 	r = amdgpu_bo_reserve(rbo, true);
855 	if (r) {
856 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
857 		return r;
858 	}
859 
860 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
861 	if (r) {
862 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
863 		goto error_unlock;
864 	}
865 
866 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
867 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
868 	else
869 		domain = AMDGPU_GEM_DOMAIN_VRAM;
870 
871 	r = amdgpu_bo_pin(rbo, domain);
872 	if (unlikely(r != 0)) {
873 		if (r != -ERESTARTSYS)
874 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
875 		goto error_unlock;
876 	}
877 
878 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
879 	if (unlikely(r != 0)) {
880 		DRM_ERROR("%p bind failed\n", rbo);
881 		goto error_unpin;
882 	}
883 
884 	r = drm_gem_plane_helper_prepare_fb(plane, new_state);
885 	if (unlikely(r != 0))
886 		goto error_unpin;
887 
888 	amdgpu_bo_unreserve(rbo);
889 
890 	afb->address = amdgpu_bo_gpu_offset(rbo);
891 
892 	amdgpu_bo_ref(rbo);
893 
894 	/**
895 	 * We don't do surface updates on planes that have been newly created,
896 	 * but we also don't have the afb->address during atomic check.
897 	 *
898 	 * Fill in buffer attributes depending on the address here, but only on
899 	 * newly created planes since they're not being used by DC yet and this
900 	 * won't modify global state.
901 	 */
902 	dm_plane_state_old = to_dm_plane_state(plane->state);
903 	dm_plane_state_new = to_dm_plane_state(new_state);
904 
905 	if (dm_plane_state_new->dc_state &&
906 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
907 		struct dc_plane_state *plane_state =
908 			dm_plane_state_new->dc_state;
909 		bool force_disable_dcc = !plane_state->dcc.enable;
910 
911 		fill_plane_buffer_attributes(
912 			adev, afb, plane_state->format, plane_state->rotation,
913 			afb->tiling_flags,
914 			&plane_state->tiling_info, &plane_state->plane_size,
915 			&plane_state->dcc, &plane_state->address,
916 			afb->tmz_surface, force_disable_dcc);
917 	}
918 
919 	return 0;
920 
921 error_unpin:
922 	amdgpu_bo_unpin(rbo);
923 
924 error_unlock:
925 	amdgpu_bo_unreserve(rbo);
926 	return r;
927 }
928 
929 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
930 				       struct drm_plane_state *old_state)
931 {
932 	struct amdgpu_bo *rbo;
933 	int r;
934 
935 	if (!old_state->fb)
936 		return;
937 
938 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
939 	r = amdgpu_bo_reserve(rbo, false);
940 	if (unlikely(r)) {
941 		DRM_ERROR("failed to reserve rbo before unpin\n");
942 		return;
943 	}
944 
945 	amdgpu_bo_unpin(rbo);
946 	amdgpu_bo_unreserve(rbo);
947 	amdgpu_bo_unref(&rbo);
948 }
949 
950 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
951 					 struct drm_framebuffer *fb,
952 					 int *min_downscale, int *max_upscale)
953 {
954 	struct amdgpu_device *adev = drm_to_adev(dev);
955 	struct dc *dc = adev->dm.dc;
956 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
957 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
958 
959 	switch (fb->format->format) {
960 	case DRM_FORMAT_P010:
961 	case DRM_FORMAT_NV12:
962 	case DRM_FORMAT_NV21:
963 		*max_upscale = plane_cap->max_upscale_factor.nv12;
964 		*min_downscale = plane_cap->max_downscale_factor.nv12;
965 		break;
966 
967 	case DRM_FORMAT_XRGB16161616F:
968 	case DRM_FORMAT_ARGB16161616F:
969 	case DRM_FORMAT_XBGR16161616F:
970 	case DRM_FORMAT_ABGR16161616F:
971 		*max_upscale = plane_cap->max_upscale_factor.fp16;
972 		*min_downscale = plane_cap->max_downscale_factor.fp16;
973 		break;
974 
975 	default:
976 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
977 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
978 		break;
979 	}
980 
981 	/*
982 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
983 	 * scaling factor of 1.0 == 1000 units.
984 	 */
985 	if (*max_upscale == 1)
986 		*max_upscale = 1000;
987 
988 	if (*min_downscale == 1)
989 		*min_downscale = 1000;
990 }
991 
992 int dm_plane_helper_check_state(struct drm_plane_state *state,
993 				       struct drm_crtc_state *new_crtc_state)
994 {
995 	struct drm_framebuffer *fb = state->fb;
996 	int min_downscale, max_upscale;
997 	int min_scale = 0;
998 	int max_scale = INT_MAX;
999 
1000 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
1001 	if (fb && state->crtc) {
1002 		/* Validate viewport to cover the case when only the position changes */
1003 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
1004 			int viewport_width = state->crtc_w;
1005 			int viewport_height = state->crtc_h;
1006 
1007 			if (state->crtc_x < 0)
1008 				viewport_width += state->crtc_x;
1009 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
1010 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
1011 
1012 			if (state->crtc_y < 0)
1013 				viewport_height += state->crtc_y;
1014 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
1015 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
1016 
1017 			if (viewport_width < 0 || viewport_height < 0) {
1018 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
1019 				return -EINVAL;
1020 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
1021 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
1022 				return -EINVAL;
1023 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
1024 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
1025 				return -EINVAL;
1026 			}
1027 
1028 		}
1029 
1030 		/* Get min/max allowed scaling factors from plane caps. */
1031 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
1032 					     &min_downscale, &max_upscale);
1033 		/*
1034 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
1035 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
1036 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
1037 		 */
1038 		min_scale = (1000 << 16) / max_upscale;
1039 		max_scale = (1000 << 16) / min_downscale;
1040 	}
1041 
1042 	return drm_atomic_helper_check_plane_state(
1043 		state, new_crtc_state, min_scale, max_scale, true, true);
1044 }
1045 
1046 int fill_dc_scaling_info(struct amdgpu_device *adev,
1047 				const struct drm_plane_state *state,
1048 				struct dc_scaling_info *scaling_info)
1049 {
1050 	int scale_w, scale_h, min_downscale, max_upscale;
1051 
1052 	memset(scaling_info, 0, sizeof(*scaling_info));
1053 
1054 	/* Source is fixed 16.16 but we ignore mantissa for now... */
1055 	scaling_info->src_rect.x = state->src_x >> 16;
1056 	scaling_info->src_rect.y = state->src_y >> 16;
1057 
1058 	/*
1059 	 * For reasons we don't (yet) fully understand a non-zero
1060 	 * src_y coordinate into an NV12 buffer can cause a
1061 	 * system hang on DCN1x.
1062 	 * To avoid hangs (and maybe be overly cautious)
1063 	 * let's reject both non-zero src_x and src_y.
1064 	 *
1065 	 * We currently know of only one use-case to reproduce a
1066 	 * scenario with non-zero src_x and src_y for NV12, which
1067 	 * is to gesture the YouTube Android app into full screen
1068 	 * on ChromeOS.
1069 	 */
1070 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1071 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
1072 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
1073 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
1074 		return -EINVAL;
1075 
1076 	scaling_info->src_rect.width = state->src_w >> 16;
1077 	if (scaling_info->src_rect.width == 0)
1078 		return -EINVAL;
1079 
1080 	scaling_info->src_rect.height = state->src_h >> 16;
1081 	if (scaling_info->src_rect.height == 0)
1082 		return -EINVAL;
1083 
1084 	scaling_info->dst_rect.x = state->crtc_x;
1085 	scaling_info->dst_rect.y = state->crtc_y;
1086 
1087 	if (state->crtc_w == 0)
1088 		return -EINVAL;
1089 
1090 	scaling_info->dst_rect.width = state->crtc_w;
1091 
1092 	if (state->crtc_h == 0)
1093 		return -EINVAL;
1094 
1095 	scaling_info->dst_rect.height = state->crtc_h;
1096 
1097 	/* DRM doesn't specify clipping on destination output. */
1098 	scaling_info->clip_rect = scaling_info->dst_rect;
1099 
1100 	/* Validate scaling per-format with DC plane caps */
1101 	if (state->plane && state->plane->dev && state->fb) {
1102 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
1103 					     &min_downscale, &max_upscale);
1104 	} else {
1105 		min_downscale = 250;
1106 		max_upscale = 16000;
1107 	}
1108 
1109 	scale_w = scaling_info->dst_rect.width * 1000 /
1110 		  scaling_info->src_rect.width;
1111 
1112 	if (scale_w < min_downscale || scale_w > max_upscale)
1113 		return -EINVAL;
1114 
1115 	scale_h = scaling_info->dst_rect.height * 1000 /
1116 		  scaling_info->src_rect.height;
1117 
1118 	if (scale_h < min_downscale || scale_h > max_upscale)
1119 		return -EINVAL;
1120 
1121 	/*
1122 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
1123 	 * assume reasonable defaults based on the format.
1124 	 */
1125 
1126 	return 0;
1127 }
1128 
1129 static int dm_plane_atomic_check(struct drm_plane *plane,
1130 				 struct drm_atomic_state *state)
1131 {
1132 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1133 										 plane);
1134 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1135 	struct dc *dc = adev->dm.dc;
1136 	struct dm_plane_state *dm_plane_state;
1137 	struct dc_scaling_info scaling_info;
1138 	struct drm_crtc_state *new_crtc_state;
1139 	int ret;
1140 
1141 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
1142 
1143 	dm_plane_state = to_dm_plane_state(new_plane_state);
1144 
1145 	if (!dm_plane_state->dc_state)
1146 		return 0;
1147 
1148 	new_crtc_state =
1149 		drm_atomic_get_new_crtc_state(state,
1150 					      new_plane_state->crtc);
1151 	if (!new_crtc_state)
1152 		return -EINVAL;
1153 
1154 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
1155 	if (ret)
1156 		return ret;
1157 
1158 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
1159 	if (ret)
1160 		return ret;
1161 
1162 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
1163 		return 0;
1164 
1165 	return -EINVAL;
1166 }
1167 
1168 static int dm_plane_atomic_async_check(struct drm_plane *plane,
1169 				       struct drm_atomic_state *state)
1170 {
1171 	/* Only support async updates on cursor planes. */
1172 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
1173 		return -EINVAL;
1174 
1175 	return 0;
1176 }
1177 
1178 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
1179 			       struct dc_cursor_position *position)
1180 {
1181 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1182 	int x, y;
1183 	int xorigin = 0, yorigin = 0;
1184 
1185 	if (!crtc || !plane->state->fb)
1186 		return 0;
1187 
1188 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
1189 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
1190 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
1191 			  __func__,
1192 			  plane->state->crtc_w,
1193 			  plane->state->crtc_h);
1194 		return -EINVAL;
1195 	}
1196 
1197 	x = plane->state->crtc_x;
1198 	y = plane->state->crtc_y;
1199 
1200 	if (x <= -amdgpu_crtc->max_cursor_width ||
1201 	    y <= -amdgpu_crtc->max_cursor_height)
1202 		return 0;
1203 
1204 	if (x < 0) {
1205 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1206 		x = 0;
1207 	}
1208 	if (y < 0) {
1209 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1210 		y = 0;
1211 	}
1212 	position->enable = true;
1213 	position->translate_by_source = true;
1214 	position->x = x;
1215 	position->y = y;
1216 	position->x_hotspot = xorigin;
1217 	position->y_hotspot = yorigin;
1218 
1219 	return 0;
1220 }
1221 
1222 void handle_cursor_update(struct drm_plane *plane,
1223 				 struct drm_plane_state *old_plane_state)
1224 {
1225 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1226 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
1227 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
1228 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
1229 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1230 	uint64_t address = afb ? afb->address : 0;
1231 	struct dc_cursor_position position = {0};
1232 	struct dc_cursor_attributes attributes;
1233 	int ret;
1234 
1235 	if (!plane->state->fb && !old_plane_state->fb)
1236 		return;
1237 
1238 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
1239 		      __func__,
1240 		      amdgpu_crtc->crtc_id,
1241 		      plane->state->crtc_w,
1242 		      plane->state->crtc_h);
1243 
1244 	ret = get_cursor_position(plane, crtc, &position);
1245 	if (ret)
1246 		return;
1247 
1248 	if (!position.enable) {
1249 		/* turn off cursor */
1250 		if (crtc_state && crtc_state->stream) {
1251 			mutex_lock(&adev->dm.dc_lock);
1252 			dc_stream_set_cursor_position(crtc_state->stream,
1253 						      &position);
1254 			mutex_unlock(&adev->dm.dc_lock);
1255 		}
1256 		return;
1257 	}
1258 
1259 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
1260 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
1261 
1262 	memset(&attributes, 0, sizeof(attributes));
1263 	attributes.address.high_part = upper_32_bits(address);
1264 	attributes.address.low_part  = lower_32_bits(address);
1265 	attributes.width             = plane->state->crtc_w;
1266 	attributes.height            = plane->state->crtc_h;
1267 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
1268 	attributes.rotation_angle    = 0;
1269 	attributes.attribute_flags.value = 0;
1270 
1271 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
1272 
1273 	if (crtc_state->stream) {
1274 		mutex_lock(&adev->dm.dc_lock);
1275 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
1276 							 &attributes))
1277 			DRM_ERROR("DC failed to set cursor attributes\n");
1278 
1279 		if (!dc_stream_set_cursor_position(crtc_state->stream,
1280 						   &position))
1281 			DRM_ERROR("DC failed to set cursor position\n");
1282 		mutex_unlock(&adev->dm.dc_lock);
1283 	}
1284 }
1285 
1286 static void dm_plane_atomic_async_update(struct drm_plane *plane,
1287 					 struct drm_atomic_state *state)
1288 {
1289 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
1290 									   plane);
1291 	struct drm_plane_state *old_state =
1292 		drm_atomic_get_old_plane_state(state, plane);
1293 
1294 	trace_amdgpu_dm_atomic_update_cursor(new_state);
1295 
1296 	swap(plane->state->fb, new_state->fb);
1297 
1298 	plane->state->src_x = new_state->src_x;
1299 	plane->state->src_y = new_state->src_y;
1300 	plane->state->src_w = new_state->src_w;
1301 	plane->state->src_h = new_state->src_h;
1302 	plane->state->crtc_x = new_state->crtc_x;
1303 	plane->state->crtc_y = new_state->crtc_y;
1304 	plane->state->crtc_w = new_state->crtc_w;
1305 	plane->state->crtc_h = new_state->crtc_h;
1306 
1307 	handle_cursor_update(plane, old_state);
1308 }
1309 
1310 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1311 	.prepare_fb = dm_plane_helper_prepare_fb,
1312 	.cleanup_fb = dm_plane_helper_cleanup_fb,
1313 	.atomic_check = dm_plane_atomic_check,
1314 	.atomic_async_check = dm_plane_atomic_async_check,
1315 	.atomic_async_update = dm_plane_atomic_async_update
1316 };
1317 
1318 static void dm_drm_plane_reset(struct drm_plane *plane)
1319 {
1320 	struct dm_plane_state *amdgpu_state = NULL;
1321 
1322 	if (plane->state)
1323 		plane->funcs->atomic_destroy_state(plane, plane->state);
1324 
1325 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
1326 	WARN_ON(amdgpu_state == NULL);
1327 
1328 	if (amdgpu_state)
1329 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
1330 #ifdef CONFIG_DRM_AMD_DC_HDR
1331 	if (amdgpu_state)
1332 		amdgpu_state->sdr_boost = DEFAULT_SDR_BOOST;
1333 #endif
1334 }
1335 
1336 static struct drm_plane_state *
1337 dm_drm_plane_duplicate_state(struct drm_plane *plane)
1338 {
1339 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
1340 
1341 	old_dm_plane_state = to_dm_plane_state(plane->state);
1342 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
1343 	if (!dm_plane_state)
1344 		return NULL;
1345 
1346 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
1347 
1348 	if (old_dm_plane_state->dc_state) {
1349 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
1350 		dc_plane_state_retain(dm_plane_state->dc_state);
1351 	}
1352 
1353 #ifdef CONFIG_DRM_AMD_DC_HDR
1354 	if (dm_plane_state->degamma_lut)
1355 		drm_property_blob_get(dm_plane_state->degamma_lut);
1356 	if (dm_plane_state->ctm)
1357 		drm_property_blob_get(dm_plane_state->ctm);
1358 
1359 	dm_plane_state->sdr_boost = old_dm_plane_state->sdr_boost;
1360 #endif
1361 
1362 	return &dm_plane_state->base;
1363 }
1364 
1365 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
1366 					  uint32_t format,
1367 					  uint64_t modifier)
1368 {
1369 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1370 	const struct drm_format_info *info = drm_format_info(format);
1371 	struct hw_asic_id asic_id = adev->dm.dc->ctx->asic_id;
1372 
1373 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
1374 
1375 	if (!info)
1376 		return false;
1377 
1378 	/*
1379 	 * We always have to allow these modifiers:
1380 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
1381 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
1382 	 */
1383 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
1384 	    modifier == DRM_FORMAT_MOD_INVALID) {
1385 		return true;
1386 	}
1387 
1388 	/* check if swizzle mode is supported by this version of DCN */
1389 	switch (asic_id.chip_family) {
1390 	case FAMILY_SI:
1391 	case FAMILY_CI:
1392 	case FAMILY_KV:
1393 	case FAMILY_CZ:
1394 	case FAMILY_VI:
1395 		/* asics before AI does not have modifier support */
1396 		return false;
1397 	case FAMILY_AI:
1398 	case FAMILY_RV:
1399 	case FAMILY_NV:
1400 	case FAMILY_VGH:
1401 	case FAMILY_YELLOW_CARP:
1402 	case AMDGPU_FAMILY_GC_10_3_6:
1403 	case AMDGPU_FAMILY_GC_10_3_7:
1404 		switch (AMD_FMT_MOD_GET(TILE, modifier)) {
1405 		case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
1406 		case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
1407 		case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
1408 		case AMD_FMT_MOD_TILE_GFX9_64K_D:
1409 			return true;
1410 		default:
1411 			return false;
1412 		}
1413 		break;
1414 	case AMDGPU_FAMILY_GC_11_0_0:
1415 	case AMDGPU_FAMILY_GC_11_0_2:
1416 		switch (AMD_FMT_MOD_GET(TILE, modifier)) {
1417 		case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
1418 		case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
1419 		case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
1420 		case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
1421 		case AMD_FMT_MOD_TILE_GFX9_64K_D:
1422 			return true;
1423 		default:
1424 			return false;
1425 		}
1426 		break;
1427 	default:
1428 		ASSERT(0); /* Unknown asic */
1429 		break;
1430 	}
1431 
1432 	/*
1433 	 * For D swizzle the canonical modifier depends on the bpp, so check
1434 	 * it here.
1435 	 */
1436 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
1437 	    adev->family >= AMDGPU_FAMILY_NV) {
1438 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
1439 			return false;
1440 	}
1441 
1442 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
1443 	    info->cpp[0] < 8)
1444 		return false;
1445 
1446 	if (modifier_has_dcc(modifier)) {
1447 		/* Per radeonsi comments 16/64 bpp are more complicated. */
1448 		if (info->cpp[0] != 4)
1449 			return false;
1450 		/* We support multi-planar formats, but not when combined with
1451 		 * additional DCC metadata planes.
1452 		 */
1453 		if (info->num_planes > 1)
1454 			return false;
1455 	}
1456 
1457 	return true;
1458 }
1459 
1460 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
1461 				struct drm_plane_state *state)
1462 {
1463 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1464 
1465 #ifdef CONFIG_DRM_AMD_DC_HDR
1466 	drm_property_blob_put(dm_plane_state->degamma_lut);
1467 	drm_property_blob_put(dm_plane_state->ctm);
1468 #endif
1469 	if (dm_plane_state->dc_state)
1470 		dc_plane_state_release(dm_plane_state->dc_state);
1471 
1472 	drm_atomic_helper_plane_destroy_state(plane, state);
1473 }
1474 
1475 #ifdef CONFIG_DRM_AMD_DC_HDR
1476 /* copied from drm_atomic_uapi.c */
1477 static int atomic_replace_property_blob_from_id(struct drm_device *dev,
1478 					 struct drm_property_blob **blob,
1479 					 uint64_t blob_id,
1480 					 ssize_t expected_size,
1481 					 ssize_t expected_elem_size,
1482 					 bool *replaced)
1483 {
1484 	struct drm_property_blob *new_blob = NULL;
1485 
1486 	if (blob_id != 0) {
1487 		new_blob = drm_property_lookup_blob(dev, blob_id);
1488 		if (new_blob == NULL)
1489 			return -EINVAL;
1490 
1491 		if (expected_size > 0 &&
1492 		    new_blob->length != expected_size) {
1493 			drm_property_blob_put(new_blob);
1494 			return -EINVAL;
1495 		}
1496 		if (expected_elem_size > 0 &&
1497 		    new_blob->length % expected_elem_size != 0) {
1498 			drm_property_blob_put(new_blob);
1499 			return -EINVAL;
1500 		}
1501 	}
1502 
1503 	*replaced |= drm_property_replace_blob(blob, new_blob);
1504 	drm_property_blob_put(new_blob);
1505 
1506 	return 0;
1507 }
1508 
1509 int dm_drm_plane_set_property(struct drm_plane *plane,
1510 			      struct drm_plane_state *state,
1511 			      struct drm_property *property,
1512 			      uint64_t val)
1513 {
1514 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1515 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1516 	int ret = 0;
1517 	bool replaced;
1518 
1519 	if (property == adev->dm.degamma_lut_property) {
1520 		ret = atomic_replace_property_blob_from_id(adev_to_drm(adev),
1521 				&dm_plane_state->degamma_lut,
1522 				val, -1, sizeof(struct drm_color_lut),
1523 				&replaced);
1524 	} else if (property == adev->dm.ctm_property) {
1525 		ret = atomic_replace_property_blob_from_id(adev_to_drm(adev),
1526 				&dm_plane_state->ctm,
1527 				val,
1528 				sizeof(struct drm_color_ctm), -1,
1529 				&replaced);
1530 	} else if (property == adev->dm.sdr_boost_property) {
1531 		dm_plane_state->sdr_boost = val;
1532 	} else {
1533 		return -EINVAL;
1534 	}
1535 
1536 	return ret;
1537 }
1538 
1539 int dm_drm_plane_get_property(struct drm_plane *plane,
1540 			      const struct drm_plane_state *state,
1541 			      struct drm_property *property,
1542 			      uint64_t *val)
1543 {
1544 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1545 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1546 
1547 	if (property == adev->dm.degamma_lut_property) {
1548 		*val = (dm_plane_state->degamma_lut) ?
1549 			dm_plane_state->degamma_lut->base.id : 0;
1550 	} else if (property == adev->dm.ctm_property) {
1551 		*val = (dm_plane_state->ctm) ? dm_plane_state->ctm->base.id : 0;
1552 	} else if (property == adev->dm.sdr_boost_property) {
1553 		*val = dm_plane_state->sdr_boost;
1554 	} else {
1555 		return -EINVAL;
1556 	}
1557 
1558 	return 0;
1559 }
1560 #endif
1561 
1562 static const struct drm_plane_funcs dm_plane_funcs = {
1563 	.update_plane	= drm_atomic_helper_update_plane,
1564 	.disable_plane	= drm_atomic_helper_disable_plane,
1565 	.destroy	= drm_primary_helper_destroy,
1566 	.reset = dm_drm_plane_reset,
1567 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
1568 	.atomic_destroy_state = dm_drm_plane_destroy_state,
1569 	.format_mod_supported = dm_plane_format_mod_supported,
1570 #ifdef CONFIG_DRM_AMD_DC_HDR
1571 	.atomic_set_property = dm_drm_plane_set_property,
1572 	.atomic_get_property = dm_drm_plane_get_property,
1573 #endif
1574 };
1575 
1576 int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
1577 				struct drm_plane *plane,
1578 				unsigned long possible_crtcs,
1579 				const struct dc_plane_cap *plane_cap)
1580 {
1581 	uint32_t formats[32];
1582 	int num_formats;
1583 	int res = -EPERM;
1584 	unsigned int supported_rotations;
1585 	uint64_t *modifiers = NULL;
1586 
1587 	num_formats = get_plane_formats(plane, plane_cap, formats,
1588 					ARRAY_SIZE(formats));
1589 
1590 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
1591 	if (res)
1592 		return res;
1593 
1594 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
1595 				       &dm_plane_funcs, formats, num_formats,
1596 				       modifiers, plane->type, NULL);
1597 	kfree(modifiers);
1598 	if (res)
1599 		return res;
1600 
1601 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
1602 	    plane_cap && plane_cap->per_pixel_alpha) {
1603 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
1604 					  BIT(DRM_MODE_BLEND_PREMULTI) |
1605 					  BIT(DRM_MODE_BLEND_COVERAGE);
1606 
1607 		drm_plane_create_alpha_property(plane);
1608 		drm_plane_create_blend_mode_property(plane, blend_caps);
1609 	}
1610 
1611 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
1612 	    plane_cap &&
1613 	    (plane_cap->pixel_format_support.nv12 ||
1614 	     plane_cap->pixel_format_support.p010)) {
1615 		/* This only affects YUV formats. */
1616 		drm_plane_create_color_properties(
1617 			plane,
1618 			BIT(DRM_COLOR_YCBCR_BT601) |
1619 			BIT(DRM_COLOR_YCBCR_BT709) |
1620 			BIT(DRM_COLOR_YCBCR_BT2020),
1621 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
1622 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1623 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
1624 	}
1625 
1626 	supported_rotations =
1627 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1628 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1629 
1630 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
1631 	    plane->type != DRM_PLANE_TYPE_CURSOR)
1632 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1633 						   supported_rotations);
1634 
1635 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
1636 
1637 #ifdef CONFIG_DRM_AMD_DC_HDR
1638 	attach_color_mgmt_properties(dm, plane);
1639 #endif
1640 	/* Create (reset) the plane state */
1641 	if (plane->funcs->reset)
1642 		plane->funcs->reset(plane);
1643 
1644 	return 0;
1645 }
1646 
1647