1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_blend.h>
29 #include <drm/drm_gem_atomic_helper.h>
30 #include <drm/drm_plane_helper.h>
31 #include <drm/drm_gem_framebuffer_helper.h>
32 #include <drm/drm_fourcc.h>
33
34 #include "amdgpu.h"
35 #include "dal_asic_id.h"
36 #include "amdgpu_display.h"
37 #include "amdgpu_dm_trace.h"
38 #include "amdgpu_dm_plane.h"
39 #include "gc/gc_11_0_0_offset.h"
40 #include "gc/gc_11_0_0_sh_mask.h"
41
42 /*
43 * TODO: these are currently initialized to rgb formats only.
44 * For future use cases we should either initialize them dynamically based on
45 * plane capabilities, or initialize this array to all formats, so internal drm
46 * check will succeed, and let DC implement proper check
47 */
48 static const uint32_t rgb_formats[] = {
49 DRM_FORMAT_XRGB8888,
50 DRM_FORMAT_ARGB8888,
51 DRM_FORMAT_RGBA8888,
52 DRM_FORMAT_XRGB2101010,
53 DRM_FORMAT_XBGR2101010,
54 DRM_FORMAT_ARGB2101010,
55 DRM_FORMAT_ABGR2101010,
56 DRM_FORMAT_XRGB16161616,
57 DRM_FORMAT_XBGR16161616,
58 DRM_FORMAT_ARGB16161616,
59 DRM_FORMAT_ABGR16161616,
60 DRM_FORMAT_XBGR8888,
61 DRM_FORMAT_ABGR8888,
62 DRM_FORMAT_RGB565,
63 };
64
65 static const uint32_t overlay_formats[] = {
66 DRM_FORMAT_XRGB8888,
67 DRM_FORMAT_ARGB8888,
68 DRM_FORMAT_RGBA8888,
69 DRM_FORMAT_XBGR8888,
70 DRM_FORMAT_ABGR8888,
71 DRM_FORMAT_RGB565,
72 DRM_FORMAT_NV21,
73 DRM_FORMAT_NV12,
74 DRM_FORMAT_P010
75 };
76
77 static const uint32_t video_formats[] = {
78 DRM_FORMAT_NV21,
79 DRM_FORMAT_NV12,
80 DRM_FORMAT_P010
81 };
82
83 static const u32 cursor_formats[] = {
84 DRM_FORMAT_ARGB8888
85 };
86
87 enum dm_micro_swizzle {
88 MICRO_SWIZZLE_Z = 0,
89 MICRO_SWIZZLE_S = 1,
90 MICRO_SWIZZLE_D = 2,
91 MICRO_SWIZZLE_R = 3
92 };
93
amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 * cmd)94 const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
95 {
96 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
97 }
98
amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * pre_multiplied_alpha,bool * global_alpha,int * global_alpha_value)99 void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
100 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
101 bool *global_alpha, int *global_alpha_value)
102 {
103 *per_pixel_alpha = false;
104 *pre_multiplied_alpha = true;
105 *global_alpha = false;
106 *global_alpha_value = 0xff;
107
108 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
109 return;
110
111 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
112 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
113 static const uint32_t alpha_formats[] = {
114 DRM_FORMAT_ARGB8888,
115 DRM_FORMAT_RGBA8888,
116 DRM_FORMAT_ABGR8888,
117 DRM_FORMAT_ARGB2101010,
118 DRM_FORMAT_ABGR2101010,
119 DRM_FORMAT_ARGB16161616,
120 DRM_FORMAT_ABGR16161616,
121 DRM_FORMAT_ARGB16161616F,
122 };
123 uint32_t format = plane_state->fb->format->format;
124 unsigned int i;
125
126 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
127 if (format == alpha_formats[i]) {
128 *per_pixel_alpha = true;
129 break;
130 }
131 }
132
133 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
134 *pre_multiplied_alpha = false;
135 }
136
137 if (plane_state->alpha < 0xffff) {
138 *global_alpha = true;
139 *global_alpha_value = plane_state->alpha >> 8;
140 }
141 }
142
add_modifier(uint64_t ** mods,uint64_t * size,uint64_t * cap,uint64_t mod)143 static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
144 {
145 if (!*mods)
146 return;
147
148 if (*cap - *size < 1) {
149 uint64_t new_cap = *cap * 2;
150 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
151
152 if (!new_mods) {
153 kfree(*mods);
154 *mods = NULL;
155 return;
156 }
157
158 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
159 kfree(*mods);
160 *mods = new_mods;
161 *cap = new_cap;
162 }
163
164 (*mods)[*size] = mod;
165 *size += 1;
166 }
167
modifier_has_dcc(uint64_t modifier)168 static bool modifier_has_dcc(uint64_t modifier)
169 {
170 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
171 }
172
modifier_gfx9_swizzle_mode(uint64_t modifier)173 static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier)
174 {
175 if (modifier == DRM_FORMAT_MOD_LINEAR)
176 return 0;
177
178 return AMD_FMT_MOD_GET(TILE, modifier);
179 }
180
fill_gfx8_tiling_info_from_flags(union dc_tiling_info * tiling_info,uint64_t tiling_flags)181 static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
182 uint64_t tiling_flags)
183 {
184 /* Fill GFX8 params */
185 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
186 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
187
188 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
189 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
190 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
191 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
192 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
193
194 /* XXX fix me for VI */
195 tiling_info->gfx8.num_banks = num_banks;
196 tiling_info->gfx8.array_mode =
197 DC_ARRAY_2D_TILED_THIN1;
198 tiling_info->gfx8.tile_split = tile_split;
199 tiling_info->gfx8.bank_width = bankw;
200 tiling_info->gfx8.bank_height = bankh;
201 tiling_info->gfx8.tile_aspect = mtaspect;
202 tiling_info->gfx8.tile_mode =
203 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
204 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
205 == DC_ARRAY_1D_TILED_THIN1) {
206 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
207 }
208
209 tiling_info->gfx8.pipe_config =
210 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
211 }
212
fill_gfx9_tiling_info_from_device(const struct amdgpu_device * adev,union dc_tiling_info * tiling_info)213 static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
214 union dc_tiling_info *tiling_info)
215 {
216 /* Fill GFX9 params */
217 tiling_info->gfx9.num_pipes =
218 adev->gfx.config.gb_addr_config_fields.num_pipes;
219 tiling_info->gfx9.num_banks =
220 adev->gfx.config.gb_addr_config_fields.num_banks;
221 tiling_info->gfx9.pipe_interleave =
222 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
223 tiling_info->gfx9.num_shader_engines =
224 adev->gfx.config.gb_addr_config_fields.num_se;
225 tiling_info->gfx9.max_compressed_frags =
226 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
227 tiling_info->gfx9.num_rb_per_se =
228 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
229 tiling_info->gfx9.shaderEnable = 1;
230 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
231 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
232 }
233
fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device * adev,union dc_tiling_info * tiling_info,uint64_t modifier)234 static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
235 union dc_tiling_info *tiling_info,
236 uint64_t modifier)
237 {
238 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
239 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
240 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
241 unsigned int pipes_log2;
242
243 pipes_log2 = min(5u, mod_pipe_xor_bits);
244
245 fill_gfx9_tiling_info_from_device(adev, tiling_info);
246
247 if (!IS_AMD_FMT_MOD(modifier))
248 return;
249
250 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
251 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
252
253 if (adev->family >= AMDGPU_FAMILY_NV) {
254 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
255 } else {
256 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
257
258 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
259 }
260 }
261
validate_dcc(struct amdgpu_device * adev,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const union dc_tiling_info * tiling_info,const struct dc_plane_dcc_param * dcc,const struct dc_plane_address * address,const struct plane_size * plane_size)262 static int validate_dcc(struct amdgpu_device *adev,
263 const enum surface_pixel_format format,
264 const enum dc_rotation_angle rotation,
265 const union dc_tiling_info *tiling_info,
266 const struct dc_plane_dcc_param *dcc,
267 const struct dc_plane_address *address,
268 const struct plane_size *plane_size)
269 {
270 struct dc *dc = adev->dm.dc;
271 struct dc_dcc_surface_param input;
272 struct dc_surface_dcc_cap output;
273
274 memset(&input, 0, sizeof(input));
275 memset(&output, 0, sizeof(output));
276
277 if (!dcc->enable)
278 return 0;
279
280 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
281 !dc->cap_funcs.get_dcc_compression_cap)
282 return -EINVAL;
283
284 input.format = format;
285 input.surface_size.width = plane_size->surface_size.width;
286 input.surface_size.height = plane_size->surface_size.height;
287 input.swizzle_mode = tiling_info->gfx9.swizzle;
288
289 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
290 input.scan = SCAN_DIRECTION_HORIZONTAL;
291 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
292 input.scan = SCAN_DIRECTION_VERTICAL;
293
294 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
295 return -EINVAL;
296
297 if (!output.capable)
298 return -EINVAL;
299
300 if (dcc->independent_64b_blks == 0 &&
301 output.grph.rgb.independent_64b_blks != 0)
302 return -EINVAL;
303
304 return 0;
305 }
306
fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,union dc_tiling_info * tiling_info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,const bool force_disable_dcc)307 static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
308 const struct amdgpu_framebuffer *afb,
309 const enum surface_pixel_format format,
310 const enum dc_rotation_angle rotation,
311 const struct plane_size *plane_size,
312 union dc_tiling_info *tiling_info,
313 struct dc_plane_dcc_param *dcc,
314 struct dc_plane_address *address,
315 const bool force_disable_dcc)
316 {
317 const uint64_t modifier = afb->base.modifier;
318 int ret = 0;
319
320 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
321 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
322
323 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
324 uint64_t dcc_address = afb->address + afb->base.offsets[1];
325 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
326 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
327
328 dcc->enable = 1;
329 dcc->meta_pitch = afb->base.pitches[1];
330 dcc->independent_64b_blks = independent_64b_blks;
331 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
332 if (independent_64b_blks && independent_128b_blks)
333 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
334 else if (independent_128b_blks)
335 dcc->dcc_ind_blk = hubp_ind_block_128b;
336 else if (independent_64b_blks && !independent_128b_blks)
337 dcc->dcc_ind_blk = hubp_ind_block_64b;
338 else
339 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
340 } else {
341 if (independent_64b_blks)
342 dcc->dcc_ind_blk = hubp_ind_block_64b;
343 else
344 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
345 }
346
347 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
348 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
349 }
350
351 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
352 if (ret)
353 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
354
355 return ret;
356 }
357
add_gfx10_1_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)358 static void add_gfx10_1_modifiers(const struct amdgpu_device *adev,
359 uint64_t **mods, uint64_t *size, uint64_t *capacity)
360 {
361 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
362
363 add_modifier(mods, size, capacity, AMD_FMT_MOD |
364 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
365 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
366 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
367 AMD_FMT_MOD_SET(DCC, 1) |
368 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
369 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
370 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
371
372 add_modifier(mods, size, capacity, AMD_FMT_MOD |
373 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
374 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
375 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
376 AMD_FMT_MOD_SET(DCC, 1) |
377 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
378 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
379 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
380 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
381
382 add_modifier(mods, size, capacity, AMD_FMT_MOD |
383 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
384 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
385 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
386
387 add_modifier(mods, size, capacity, AMD_FMT_MOD |
388 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
389 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
390 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
391
392
393 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
394 add_modifier(mods, size, capacity, AMD_FMT_MOD |
395 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
396 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
397
398 add_modifier(mods, size, capacity, AMD_FMT_MOD |
399 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
400 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
401 }
402
add_gfx9_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)403 static void add_gfx9_modifiers(const struct amdgpu_device *adev,
404 uint64_t **mods, uint64_t *size, uint64_t *capacity)
405 {
406 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
407 int pipe_xor_bits = min(8, pipes +
408 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
409 int bank_xor_bits = min(8 - pipe_xor_bits,
410 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
411 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
412 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
413
414
415 if (adev->family == AMDGPU_FAMILY_RV) {
416 /* Raven2 and later */
417 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
418
419 /*
420 * No _D DCC swizzles yet because we only allow 32bpp, which
421 * doesn't support _D on DCN
422 */
423
424 if (has_constant_encode) {
425 add_modifier(mods, size, capacity, AMD_FMT_MOD |
426 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
427 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
428 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
429 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
430 AMD_FMT_MOD_SET(DCC, 1) |
431 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
432 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
433 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
434 }
435
436 add_modifier(mods, size, capacity, AMD_FMT_MOD |
437 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
438 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
439 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
440 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
441 AMD_FMT_MOD_SET(DCC, 1) |
442 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
443 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
444 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
445
446 if (has_constant_encode) {
447 add_modifier(mods, size, capacity, AMD_FMT_MOD |
448 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
449 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
450 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
451 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
452 AMD_FMT_MOD_SET(DCC, 1) |
453 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
454 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
455 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
456
457 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
458 AMD_FMT_MOD_SET(RB, rb) |
459 AMD_FMT_MOD_SET(PIPE, pipes));
460 }
461
462 add_modifier(mods, size, capacity, AMD_FMT_MOD |
463 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
464 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
465 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
466 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
467 AMD_FMT_MOD_SET(DCC, 1) |
468 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
469 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
470 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
471 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
472 AMD_FMT_MOD_SET(RB, rb) |
473 AMD_FMT_MOD_SET(PIPE, pipes));
474 }
475
476 /*
477 * Only supported for 64bpp on Raven, will be filtered on format in
478 * dm_plane_format_mod_supported.
479 */
480 add_modifier(mods, size, capacity, AMD_FMT_MOD |
481 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
482 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
483 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
484 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
485
486 if (adev->family == AMDGPU_FAMILY_RV) {
487 add_modifier(mods, size, capacity, AMD_FMT_MOD |
488 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
489 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
490 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
491 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
492 }
493
494 /*
495 * Only supported for 64bpp on Raven, will be filtered on format in
496 * dm_plane_format_mod_supported.
497 */
498 add_modifier(mods, size, capacity, AMD_FMT_MOD |
499 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
500 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
501
502 if (adev->family == AMDGPU_FAMILY_RV) {
503 add_modifier(mods, size, capacity, AMD_FMT_MOD |
504 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
505 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
506 }
507 }
508
add_gfx10_3_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)509 static void add_gfx10_3_modifiers(const struct amdgpu_device *adev,
510 uint64_t **mods, uint64_t *size, uint64_t *capacity)
511 {
512 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
513 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
514
515 add_modifier(mods, size, capacity, AMD_FMT_MOD |
516 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
517 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
518 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
519 AMD_FMT_MOD_SET(PACKERS, pkrs) |
520 AMD_FMT_MOD_SET(DCC, 1) |
521 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
522 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
523 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
524 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
525
526 add_modifier(mods, size, capacity, AMD_FMT_MOD |
527 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
528 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
529 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
530 AMD_FMT_MOD_SET(PACKERS, pkrs) |
531 AMD_FMT_MOD_SET(DCC, 1) |
532 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
533 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
534 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
535
536 add_modifier(mods, size, capacity, AMD_FMT_MOD |
537 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
538 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
539 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
540 AMD_FMT_MOD_SET(PACKERS, pkrs) |
541 AMD_FMT_MOD_SET(DCC, 1) |
542 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
543 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
544 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
545 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
546 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
547
548 add_modifier(mods, size, capacity, AMD_FMT_MOD |
549 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
550 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
551 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
552 AMD_FMT_MOD_SET(PACKERS, pkrs) |
553 AMD_FMT_MOD_SET(DCC, 1) |
554 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
555 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
556 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
557 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
558
559 add_modifier(mods, size, capacity, AMD_FMT_MOD |
560 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
561 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
562 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
563 AMD_FMT_MOD_SET(PACKERS, pkrs));
564
565 add_modifier(mods, size, capacity, AMD_FMT_MOD |
566 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
567 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
568 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
569 AMD_FMT_MOD_SET(PACKERS, pkrs));
570
571 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
572 add_modifier(mods, size, capacity, AMD_FMT_MOD |
573 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
574 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
575
576 add_modifier(mods, size, capacity, AMD_FMT_MOD |
577 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
578 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
579 }
580
add_gfx11_modifiers(struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)581 static void add_gfx11_modifiers(struct amdgpu_device *adev,
582 uint64_t **mods, uint64_t *size, uint64_t *capacity)
583 {
584 int num_pipes = 0;
585 int pipe_xor_bits = 0;
586 int num_pkrs = 0;
587 int pkrs = 0;
588 u32 gb_addr_config;
589 u8 i = 0;
590 unsigned int swizzle_r_x;
591 uint64_t modifier_r_x;
592 uint64_t modifier_dcc_best;
593 uint64_t modifier_dcc_4k;
594
595 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
596 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes}
597 */
598 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
599 ASSERT(gb_addr_config != 0);
600
601 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
602 pkrs = ilog2(num_pkrs);
603 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
604 pipe_xor_bits = ilog2(num_pipes);
605
606 for (i = 0; i < 2; i++) {
607 /* Insert the best one first. */
608 /* R_X swizzle modes are the best for rendering and DCC requires them. */
609 if (num_pipes > 16)
610 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
611 else
612 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
613
614 modifier_r_x = AMD_FMT_MOD |
615 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
616 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
617 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
618 AMD_FMT_MOD_SET(PACKERS, pkrs);
619
620 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
621 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
622 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
623 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
624 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
625
626 /* DCC settings for 4K and greater resolutions. (required by display hw) */
627 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
628 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
629 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
630 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
631
632 add_modifier(mods, size, capacity, modifier_dcc_best);
633 add_modifier(mods, size, capacity, modifier_dcc_4k);
634
635 add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
636 add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
637
638 add_modifier(mods, size, capacity, modifier_r_x);
639 }
640
641 add_modifier(mods, size, capacity, AMD_FMT_MOD |
642 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
643 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
644 }
645
get_plane_modifiers(struct amdgpu_device * adev,unsigned int plane_type,uint64_t ** mods)646 static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
647 {
648 uint64_t size = 0, capacity = 128;
649 *mods = NULL;
650
651 /* We have not hooked up any pre-GFX9 modifiers. */
652 if (adev->family < AMDGPU_FAMILY_AI)
653 return 0;
654
655 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
656
657 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
658 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
659 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
660 return *mods ? 0 : -ENOMEM;
661 }
662
663 switch (adev->family) {
664 case AMDGPU_FAMILY_AI:
665 case AMDGPU_FAMILY_RV:
666 add_gfx9_modifiers(adev, mods, &size, &capacity);
667 break;
668 case AMDGPU_FAMILY_NV:
669 case AMDGPU_FAMILY_VGH:
670 case AMDGPU_FAMILY_YC:
671 case AMDGPU_FAMILY_GC_10_3_6:
672 case AMDGPU_FAMILY_GC_10_3_7:
673 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
674 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
675 else
676 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
677 break;
678 case AMDGPU_FAMILY_GC_11_0_0:
679 case AMDGPU_FAMILY_GC_11_0_1:
680 add_gfx11_modifiers(adev, mods, &size, &capacity);
681 break;
682 }
683
684 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
685
686 /* INVALID marks the end of the list. */
687 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
688
689 if (!*mods)
690 return -ENOMEM;
691
692 return 0;
693 }
694
get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)695 static int get_plane_formats(const struct drm_plane *plane,
696 const struct dc_plane_cap *plane_cap,
697 uint32_t *formats, int max_formats)
698 {
699 int i, num_formats = 0;
700
701 /*
702 * TODO: Query support for each group of formats directly from
703 * DC plane caps. This will require adding more formats to the
704 * caps list.
705 */
706
707 if (plane->type == DRM_PLANE_TYPE_PRIMARY ||
708 (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) {
709 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
710 if (num_formats >= max_formats)
711 break;
712
713 formats[num_formats++] = rgb_formats[i];
714 }
715
716 if (plane_cap && plane_cap->pixel_format_support.nv12)
717 formats[num_formats++] = DRM_FORMAT_NV12;
718 if (plane_cap && plane_cap->pixel_format_support.p010)
719 formats[num_formats++] = DRM_FORMAT_P010;
720 if (plane_cap && plane_cap->pixel_format_support.fp16) {
721 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
722 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
723 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
724 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
725 }
726 } else {
727 switch (plane->type) {
728 case DRM_PLANE_TYPE_OVERLAY:
729 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
730 if (num_formats >= max_formats)
731 break;
732
733 formats[num_formats++] = overlay_formats[i];
734 }
735 break;
736
737 case DRM_PLANE_TYPE_CURSOR:
738 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
739 if (num_formats >= max_formats)
740 break;
741
742 formats[num_formats++] = cursor_formats[i];
743 }
744 break;
745
746 default:
747 break;
748 }
749 }
750
751 return num_formats;
752 }
753
amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,union dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)754 int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
755 const struct amdgpu_framebuffer *afb,
756 const enum surface_pixel_format format,
757 const enum dc_rotation_angle rotation,
758 const uint64_t tiling_flags,
759 union dc_tiling_info *tiling_info,
760 struct plane_size *plane_size,
761 struct dc_plane_dcc_param *dcc,
762 struct dc_plane_address *address,
763 bool tmz_surface,
764 bool force_disable_dcc)
765 {
766 const struct drm_framebuffer *fb = &afb->base;
767 int ret;
768
769 memset(tiling_info, 0, sizeof(*tiling_info));
770 memset(plane_size, 0, sizeof(*plane_size));
771 memset(dcc, 0, sizeof(*dcc));
772 memset(address, 0, sizeof(*address));
773
774 address->tmz_surface = tmz_surface;
775
776 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
777 uint64_t addr = afb->address + fb->offsets[0];
778
779 plane_size->surface_size.x = 0;
780 plane_size->surface_size.y = 0;
781 plane_size->surface_size.width = fb->width;
782 plane_size->surface_size.height = fb->height;
783 plane_size->surface_pitch =
784 fb->pitches[0] / fb->format->cpp[0];
785
786 address->type = PLN_ADDR_TYPE_GRAPHICS;
787 address->grph.addr.low_part = lower_32_bits(addr);
788 address->grph.addr.high_part = upper_32_bits(addr);
789 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
790 uint64_t luma_addr = afb->address + fb->offsets[0];
791 uint64_t chroma_addr = afb->address + fb->offsets[1];
792
793 plane_size->surface_size.x = 0;
794 plane_size->surface_size.y = 0;
795 plane_size->surface_size.width = fb->width;
796 plane_size->surface_size.height = fb->height;
797 plane_size->surface_pitch =
798 fb->pitches[0] / fb->format->cpp[0];
799
800 plane_size->chroma_size.x = 0;
801 plane_size->chroma_size.y = 0;
802 /* TODO: set these based on surface format */
803 plane_size->chroma_size.width = fb->width / 2;
804 plane_size->chroma_size.height = fb->height / 2;
805
806 plane_size->chroma_pitch =
807 fb->pitches[1] / fb->format->cpp[1];
808
809 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
810 address->video_progressive.luma_addr.low_part =
811 lower_32_bits(luma_addr);
812 address->video_progressive.luma_addr.high_part =
813 upper_32_bits(luma_addr);
814 address->video_progressive.chroma_addr.low_part =
815 lower_32_bits(chroma_addr);
816 address->video_progressive.chroma_addr.high_part =
817 upper_32_bits(chroma_addr);
818 }
819
820 if (adev->family >= AMDGPU_FAMILY_AI) {
821 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
822 rotation, plane_size,
823 tiling_info, dcc,
824 address,
825 force_disable_dcc);
826 if (ret)
827 return ret;
828 } else {
829 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
830 }
831
832 return 0;
833 }
834
dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)835 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
836 struct drm_plane_state *new_state)
837 {
838 struct amdgpu_framebuffer *afb;
839 struct drm_gem_object *obj;
840 struct amdgpu_device *adev;
841 struct amdgpu_bo *rbo;
842 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
843 uint32_t domain;
844 int r;
845
846 if (!new_state->fb) {
847 DRM_DEBUG_KMS("No FB bound\n");
848 return 0;
849 }
850
851 afb = to_amdgpu_framebuffer(new_state->fb);
852 obj = drm_gem_fb_get_obj(new_state->fb, 0);
853 if (!obj) {
854 DRM_ERROR("Failed to get obj from framebuffer\n");
855 return -EINVAL;
856 }
857
858 rbo = gem_to_amdgpu_bo(obj);
859 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
860 r = amdgpu_bo_reserve(rbo, true);
861 if (r) {
862 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
863 return r;
864 }
865
866 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
867 if (r) {
868 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
869 goto error_unlock;
870 }
871
872 if (plane->type != DRM_PLANE_TYPE_CURSOR)
873 domain = amdgpu_display_supported_domains(adev, rbo->flags);
874 else
875 domain = AMDGPU_GEM_DOMAIN_VRAM;
876
877 r = amdgpu_bo_pin(rbo, domain);
878 if (unlikely(r != 0)) {
879 if (r != -ERESTARTSYS)
880 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
881 goto error_unlock;
882 }
883
884 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
885 if (unlikely(r != 0)) {
886 DRM_ERROR("%p bind failed\n", rbo);
887 goto error_unpin;
888 }
889
890 r = drm_gem_plane_helper_prepare_fb(plane, new_state);
891 if (unlikely(r != 0))
892 goto error_unpin;
893
894 amdgpu_bo_unreserve(rbo);
895
896 afb->address = amdgpu_bo_gpu_offset(rbo);
897
898 amdgpu_bo_ref(rbo);
899
900 /**
901 * We don't do surface updates on planes that have been newly created,
902 * but we also don't have the afb->address during atomic check.
903 *
904 * Fill in buffer attributes depending on the address here, but only on
905 * newly created planes since they're not being used by DC yet and this
906 * won't modify global state.
907 */
908 dm_plane_state_old = to_dm_plane_state(plane->state);
909 dm_plane_state_new = to_dm_plane_state(new_state);
910
911 if (dm_plane_state_new->dc_state &&
912 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
913 struct dc_plane_state *plane_state =
914 dm_plane_state_new->dc_state;
915 bool force_disable_dcc = !plane_state->dcc.enable;
916
917 amdgpu_dm_plane_fill_plane_buffer_attributes(
918 adev, afb, plane_state->format, plane_state->rotation,
919 afb->tiling_flags,
920 &plane_state->tiling_info, &plane_state->plane_size,
921 &plane_state->dcc, &plane_state->address,
922 afb->tmz_surface, force_disable_dcc);
923 }
924
925 return 0;
926
927 error_unpin:
928 amdgpu_bo_unpin(rbo);
929
930 error_unlock:
931 amdgpu_bo_unreserve(rbo);
932 return r;
933 }
934
dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)935 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
936 struct drm_plane_state *old_state)
937 {
938 struct amdgpu_bo *rbo;
939 int r;
940
941 if (!old_state->fb)
942 return;
943
944 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
945 r = amdgpu_bo_reserve(rbo, false);
946 if (unlikely(r)) {
947 DRM_ERROR("failed to reserve rbo before unpin\n");
948 return;
949 }
950
951 amdgpu_bo_unpin(rbo);
952 amdgpu_bo_unreserve(rbo);
953 amdgpu_bo_unref(&rbo);
954 }
955
get_min_max_dc_plane_scaling(struct drm_device * dev,struct drm_framebuffer * fb,int * min_downscale,int * max_upscale)956 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
957 struct drm_framebuffer *fb,
958 int *min_downscale, int *max_upscale)
959 {
960 struct amdgpu_device *adev = drm_to_adev(dev);
961 struct dc *dc = adev->dm.dc;
962 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
963 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
964
965 switch (fb->format->format) {
966 case DRM_FORMAT_P010:
967 case DRM_FORMAT_NV12:
968 case DRM_FORMAT_NV21:
969 *max_upscale = plane_cap->max_upscale_factor.nv12;
970 *min_downscale = plane_cap->max_downscale_factor.nv12;
971 break;
972
973 case DRM_FORMAT_XRGB16161616F:
974 case DRM_FORMAT_ARGB16161616F:
975 case DRM_FORMAT_XBGR16161616F:
976 case DRM_FORMAT_ABGR16161616F:
977 *max_upscale = plane_cap->max_upscale_factor.fp16;
978 *min_downscale = plane_cap->max_downscale_factor.fp16;
979 break;
980
981 default:
982 *max_upscale = plane_cap->max_upscale_factor.argb8888;
983 *min_downscale = plane_cap->max_downscale_factor.argb8888;
984 break;
985 }
986
987 /*
988 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
989 * scaling factor of 1.0 == 1000 units.
990 */
991 if (*max_upscale == 1)
992 *max_upscale = 1000;
993
994 if (*min_downscale == 1)
995 *min_downscale = 1000;
996 }
997
amdgpu_dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)998 int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
999 struct drm_crtc_state *new_crtc_state)
1000 {
1001 struct drm_framebuffer *fb = state->fb;
1002 int min_downscale, max_upscale;
1003 int min_scale = 0;
1004 int max_scale = INT_MAX;
1005
1006 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
1007 if (fb && state->crtc) {
1008 /* Validate viewport to cover the case when only the position changes */
1009 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
1010 int viewport_width = state->crtc_w;
1011 int viewport_height = state->crtc_h;
1012
1013 if (state->crtc_x < 0)
1014 viewport_width += state->crtc_x;
1015 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
1016 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
1017
1018 if (state->crtc_y < 0)
1019 viewport_height += state->crtc_y;
1020 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
1021 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
1022
1023 if (viewport_width < 0 || viewport_height < 0) {
1024 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
1025 return -EINVAL;
1026 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
1027 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
1028 return -EINVAL;
1029 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
1030 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
1031 return -EINVAL;
1032 }
1033
1034 }
1035
1036 /* Get min/max allowed scaling factors from plane caps. */
1037 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
1038 &min_downscale, &max_upscale);
1039 /*
1040 * Convert to drm convention: 16.16 fixed point, instead of dc's
1041 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
1042 * dst/src, so min_scale = 1.0 / max_upscale, etc.
1043 */
1044 min_scale = (1000 << 16) / max_upscale;
1045 max_scale = (1000 << 16) / min_downscale;
1046 }
1047
1048 return drm_atomic_helper_check_plane_state(
1049 state, new_crtc_state, min_scale, max_scale, true, true);
1050 }
1051
amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device * adev,const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)1052 int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
1053 const struct drm_plane_state *state,
1054 struct dc_scaling_info *scaling_info)
1055 {
1056 int scale_w, scale_h, min_downscale, max_upscale;
1057
1058 memset(scaling_info, 0, sizeof(*scaling_info));
1059
1060 /* Source is fixed 16.16 but we ignore mantissa for now... */
1061 scaling_info->src_rect.x = state->src_x >> 16;
1062 scaling_info->src_rect.y = state->src_y >> 16;
1063
1064 /*
1065 * For reasons we don't (yet) fully understand a non-zero
1066 * src_y coordinate into an NV12 buffer can cause a
1067 * system hang on DCN1x.
1068 * To avoid hangs (and maybe be overly cautious)
1069 * let's reject both non-zero src_x and src_y.
1070 *
1071 * We currently know of only one use-case to reproduce a
1072 * scenario with non-zero src_x and src_y for NV12, which
1073 * is to gesture the YouTube Android app into full screen
1074 * on ChromeOS.
1075 */
1076 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1077 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
1078 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
1079 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
1080 return -EINVAL;
1081
1082 scaling_info->src_rect.width = state->src_w >> 16;
1083 if (scaling_info->src_rect.width == 0)
1084 return -EINVAL;
1085
1086 scaling_info->src_rect.height = state->src_h >> 16;
1087 if (scaling_info->src_rect.height == 0)
1088 return -EINVAL;
1089
1090 scaling_info->dst_rect.x = state->crtc_x;
1091 scaling_info->dst_rect.y = state->crtc_y;
1092
1093 if (state->crtc_w == 0)
1094 return -EINVAL;
1095
1096 scaling_info->dst_rect.width = state->crtc_w;
1097
1098 if (state->crtc_h == 0)
1099 return -EINVAL;
1100
1101 scaling_info->dst_rect.height = state->crtc_h;
1102
1103 /* DRM doesn't specify clipping on destination output. */
1104 scaling_info->clip_rect = scaling_info->dst_rect;
1105
1106 /* Validate scaling per-format with DC plane caps */
1107 if (state->plane && state->plane->dev && state->fb) {
1108 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
1109 &min_downscale, &max_upscale);
1110 } else {
1111 min_downscale = 250;
1112 max_upscale = 16000;
1113 }
1114
1115 scale_w = scaling_info->dst_rect.width * 1000 /
1116 scaling_info->src_rect.width;
1117
1118 if (scale_w < min_downscale || scale_w > max_upscale)
1119 return -EINVAL;
1120
1121 scale_h = scaling_info->dst_rect.height * 1000 /
1122 scaling_info->src_rect.height;
1123
1124 if (scale_h < min_downscale || scale_h > max_upscale)
1125 return -EINVAL;
1126
1127 /*
1128 * The "scaling_quality" can be ignored for now, quality = 0 has DC
1129 * assume reasonable defaults based on the format.
1130 */
1131
1132 return 0;
1133 }
1134
dm_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)1135 static int dm_plane_atomic_check(struct drm_plane *plane,
1136 struct drm_atomic_state *state)
1137 {
1138 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1139 plane);
1140 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1141 struct dc *dc = adev->dm.dc;
1142 struct dm_plane_state *dm_plane_state;
1143 struct dc_scaling_info scaling_info;
1144 struct drm_crtc_state *new_crtc_state;
1145 int ret;
1146
1147 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
1148
1149 dm_plane_state = to_dm_plane_state(new_plane_state);
1150
1151 if (!dm_plane_state->dc_state)
1152 return 0;
1153
1154 new_crtc_state =
1155 drm_atomic_get_new_crtc_state(state,
1156 new_plane_state->crtc);
1157 if (!new_crtc_state)
1158 return -EINVAL;
1159
1160 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
1161 if (ret)
1162 return ret;
1163
1164 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
1165 if (ret)
1166 return ret;
1167
1168 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
1169 return 0;
1170
1171 return -EINVAL;
1172 }
1173
dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_atomic_state * state)1174 static int dm_plane_atomic_async_check(struct drm_plane *plane,
1175 struct drm_atomic_state *state)
1176 {
1177 /* Only support async updates on cursor planes. */
1178 if (plane->type != DRM_PLANE_TYPE_CURSOR)
1179 return -EINVAL;
1180
1181 return 0;
1182 }
1183
get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)1184 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
1185 struct dc_cursor_position *position)
1186 {
1187 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1188 int x, y;
1189 int xorigin = 0, yorigin = 0;
1190
1191 if (!crtc || !plane->state->fb)
1192 return 0;
1193
1194 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
1195 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
1196 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
1197 __func__,
1198 plane->state->crtc_w,
1199 plane->state->crtc_h);
1200 return -EINVAL;
1201 }
1202
1203 x = plane->state->crtc_x;
1204 y = plane->state->crtc_y;
1205
1206 if (x <= -amdgpu_crtc->max_cursor_width ||
1207 y <= -amdgpu_crtc->max_cursor_height)
1208 return 0;
1209
1210 if (x < 0) {
1211 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1212 x = 0;
1213 }
1214 if (y < 0) {
1215 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1216 y = 0;
1217 }
1218 position->enable = true;
1219 position->translate_by_source = true;
1220 position->x = x;
1221 position->y = y;
1222 position->x_hotspot = xorigin;
1223 position->y_hotspot = yorigin;
1224
1225 return 0;
1226 }
1227
amdgpu_dm_plane_handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)1228 void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
1229 struct drm_plane_state *old_plane_state)
1230 {
1231 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1232 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
1233 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
1234 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
1235 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1236 uint64_t address = afb ? afb->address : 0;
1237 struct dc_cursor_position position = {0};
1238 struct dc_cursor_attributes attributes;
1239 int ret;
1240
1241 if (!plane->state->fb && !old_plane_state->fb)
1242 return;
1243
1244 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
1245 __func__,
1246 amdgpu_crtc->crtc_id,
1247 plane->state->crtc_w,
1248 plane->state->crtc_h);
1249
1250 ret = get_cursor_position(plane, crtc, &position);
1251 if (ret)
1252 return;
1253
1254 if (!position.enable) {
1255 /* turn off cursor */
1256 if (crtc_state && crtc_state->stream) {
1257 mutex_lock(&adev->dm.dc_lock);
1258 dc_stream_set_cursor_position(crtc_state->stream,
1259 &position);
1260 mutex_unlock(&adev->dm.dc_lock);
1261 }
1262 return;
1263 }
1264
1265 amdgpu_crtc->cursor_width = plane->state->crtc_w;
1266 amdgpu_crtc->cursor_height = plane->state->crtc_h;
1267
1268 memset(&attributes, 0, sizeof(attributes));
1269 attributes.address.high_part = upper_32_bits(address);
1270 attributes.address.low_part = lower_32_bits(address);
1271 attributes.width = plane->state->crtc_w;
1272 attributes.height = plane->state->crtc_h;
1273 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
1274 attributes.rotation_angle = 0;
1275 attributes.attribute_flags.value = 0;
1276
1277 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
1278 * legacy gamma setup.
1279 */
1280 if (crtc_state->cm_is_degamma_srgb &&
1281 adev->dm.dc->caps.color.dpp.gamma_corr)
1282 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
1283
1284 if (afb)
1285 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
1286
1287 if (crtc_state->stream) {
1288 mutex_lock(&adev->dm.dc_lock);
1289 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
1290 &attributes))
1291 DRM_ERROR("DC failed to set cursor attributes\n");
1292
1293 if (!dc_stream_set_cursor_position(crtc_state->stream,
1294 &position))
1295 DRM_ERROR("DC failed to set cursor position\n");
1296 mutex_unlock(&adev->dm.dc_lock);
1297 }
1298 }
1299
dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_atomic_state * state)1300 static void dm_plane_atomic_async_update(struct drm_plane *plane,
1301 struct drm_atomic_state *state)
1302 {
1303 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
1304 plane);
1305 struct drm_plane_state *old_state =
1306 drm_atomic_get_old_plane_state(state, plane);
1307
1308 trace_amdgpu_dm_atomic_update_cursor(new_state);
1309
1310 swap(plane->state->fb, new_state->fb);
1311
1312 plane->state->src_x = new_state->src_x;
1313 plane->state->src_y = new_state->src_y;
1314 plane->state->src_w = new_state->src_w;
1315 plane->state->src_h = new_state->src_h;
1316 plane->state->crtc_x = new_state->crtc_x;
1317 plane->state->crtc_y = new_state->crtc_y;
1318 plane->state->crtc_w = new_state->crtc_w;
1319 plane->state->crtc_h = new_state->crtc_h;
1320
1321 amdgpu_dm_plane_handle_cursor_update(plane, old_state);
1322 }
1323
1324 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1325 .prepare_fb = dm_plane_helper_prepare_fb,
1326 .cleanup_fb = dm_plane_helper_cleanup_fb,
1327 .atomic_check = dm_plane_atomic_check,
1328 .atomic_async_check = dm_plane_atomic_async_check,
1329 .atomic_async_update = dm_plane_atomic_async_update
1330 };
1331
dm_drm_plane_reset(struct drm_plane * plane)1332 static void dm_drm_plane_reset(struct drm_plane *plane)
1333 {
1334 struct dm_plane_state *amdgpu_state = NULL;
1335
1336 if (plane->state)
1337 plane->funcs->atomic_destroy_state(plane, plane->state);
1338
1339 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
1340 WARN_ON(amdgpu_state == NULL);
1341
1342 if (amdgpu_state)
1343 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
1344 }
1345
1346 static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane * plane)1347 dm_drm_plane_duplicate_state(struct drm_plane *plane)
1348 {
1349 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
1350
1351 old_dm_plane_state = to_dm_plane_state(plane->state);
1352 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
1353 if (!dm_plane_state)
1354 return NULL;
1355
1356 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
1357
1358 if (old_dm_plane_state->dc_state) {
1359 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
1360 dc_plane_state_retain(dm_plane_state->dc_state);
1361 }
1362
1363 return &dm_plane_state->base;
1364 }
1365
dm_plane_format_mod_supported(struct drm_plane * plane,uint32_t format,uint64_t modifier)1366 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
1367 uint32_t format,
1368 uint64_t modifier)
1369 {
1370 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1371 const struct drm_format_info *info = drm_format_info(format);
1372 int i;
1373
1374 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
1375
1376 if (!info)
1377 return false;
1378
1379 /*
1380 * We always have to allow these modifiers:
1381 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
1382 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
1383 */
1384 if (modifier == DRM_FORMAT_MOD_LINEAR ||
1385 modifier == DRM_FORMAT_MOD_INVALID) {
1386 return true;
1387 }
1388
1389 /* Check that the modifier is on the list of the plane's supported modifiers. */
1390 for (i = 0; i < plane->modifier_count; i++) {
1391 if (modifier == plane->modifiers[i])
1392 break;
1393 }
1394 if (i == plane->modifier_count)
1395 return false;
1396
1397 /*
1398 * For D swizzle the canonical modifier depends on the bpp, so check
1399 * it here.
1400 */
1401 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
1402 adev->family >= AMDGPU_FAMILY_NV) {
1403 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
1404 return false;
1405 }
1406
1407 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
1408 info->cpp[0] < 8)
1409 return false;
1410
1411 if (modifier_has_dcc(modifier)) {
1412 /* Per radeonsi comments 16/64 bpp are more complicated. */
1413 if (info->cpp[0] != 4)
1414 return false;
1415 /* We support multi-planar formats, but not when combined with
1416 * additional DCC metadata planes.
1417 */
1418 if (info->num_planes > 1)
1419 return false;
1420 }
1421
1422 return true;
1423 }
1424
dm_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)1425 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
1426 struct drm_plane_state *state)
1427 {
1428 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1429
1430 if (dm_plane_state->dc_state)
1431 dc_plane_state_release(dm_plane_state->dc_state);
1432
1433 drm_atomic_helper_plane_destroy_state(plane, state);
1434 }
1435
1436 static const struct drm_plane_funcs dm_plane_funcs = {
1437 .update_plane = drm_atomic_helper_update_plane,
1438 .disable_plane = drm_atomic_helper_disable_plane,
1439 .destroy = drm_plane_helper_destroy,
1440 .reset = dm_drm_plane_reset,
1441 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
1442 .atomic_destroy_state = dm_drm_plane_destroy_state,
1443 .format_mod_supported = dm_plane_format_mod_supported,
1444 };
1445
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)1446 int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
1447 struct drm_plane *plane,
1448 unsigned long possible_crtcs,
1449 const struct dc_plane_cap *plane_cap)
1450 {
1451 uint32_t formats[32];
1452 int num_formats;
1453 int res = -EPERM;
1454 unsigned int supported_rotations;
1455 uint64_t *modifiers = NULL;
1456
1457 num_formats = get_plane_formats(plane, plane_cap, formats,
1458 ARRAY_SIZE(formats));
1459
1460 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
1461 if (res)
1462 return res;
1463
1464 if (modifiers == NULL)
1465 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
1466
1467 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
1468 &dm_plane_funcs, formats, num_formats,
1469 modifiers, plane->type, NULL);
1470 kfree(modifiers);
1471 if (res)
1472 return res;
1473
1474 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
1475 plane_cap && plane_cap->per_pixel_alpha) {
1476 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
1477 BIT(DRM_MODE_BLEND_PREMULTI) |
1478 BIT(DRM_MODE_BLEND_COVERAGE);
1479
1480 drm_plane_create_alpha_property(plane);
1481 drm_plane_create_blend_mode_property(plane, blend_caps);
1482 }
1483
1484 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
1485 drm_plane_create_zpos_immutable_property(plane, 0);
1486 } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
1487 unsigned int zpos = 1 + drm_plane_index(plane);
1488 drm_plane_create_zpos_property(plane, zpos, 1, 254);
1489 } else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
1490 drm_plane_create_zpos_immutable_property(plane, 255);
1491 }
1492
1493 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
1494 plane_cap &&
1495 (plane_cap->pixel_format_support.nv12 ||
1496 plane_cap->pixel_format_support.p010)) {
1497 /* This only affects YUV formats. */
1498 drm_plane_create_color_properties(
1499 plane,
1500 BIT(DRM_COLOR_YCBCR_BT601) |
1501 BIT(DRM_COLOR_YCBCR_BT709) |
1502 BIT(DRM_COLOR_YCBCR_BT2020),
1503 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
1504 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1505 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
1506 }
1507
1508 supported_rotations =
1509 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1510 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1511
1512 if (dm->adev->asic_type >= CHIP_BONAIRE &&
1513 plane->type != DRM_PLANE_TYPE_CURSOR)
1514 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1515 supported_rotations);
1516
1517 if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) &&
1518 plane->type != DRM_PLANE_TYPE_CURSOR)
1519 drm_plane_enable_fb_damage_clips(plane);
1520
1521 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
1522
1523 /* Create (reset) the plane state */
1524 if (plane->funcs->reset)
1525 plane->funcs->reset(plane);
1526
1527 return 0;
1528 }
1529
is_video_format(uint32_t format)1530 bool is_video_format(uint32_t format)
1531 {
1532 int i;
1533
1534 for (i = 0; i < ARRAY_SIZE(video_formats); i++)
1535 if (format == video_formats[i])
1536 return true;
1537
1538 return false;
1539 }
1540
1541