Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27#include <drm/drm_atomic_helper.h>
28#include <drm/drm_blend.h>
29#include <drm/drm_gem_atomic_helper.h>
30#include <drm/drm_plane_helper.h>
31#include <drm/drm_gem_framebuffer_helper.h>
32#include <drm/drm_fourcc.h>
33
34#include "amdgpu.h"
35#include "dal_asic_id.h"
36#include "amdgpu_display.h"
37#include "amdgpu_dm_trace.h"
38#include "amdgpu_dm_plane.h"
39#include "gc/gc_11_0_0_offset.h"
40#include "gc/gc_11_0_0_sh_mask.h"
41
42/*
43 * TODO: these are currently initialized to rgb formats only.
44 * For future use cases we should either initialize them dynamically based on
45 * plane capabilities, or initialize this array to all formats, so internal drm
46 * check will succeed, and let DC implement proper check
47 */
48static const uint32_t rgb_formats[] = {
49 DRM_FORMAT_XRGB8888,
50 DRM_FORMAT_ARGB8888,
51 DRM_FORMAT_RGBA8888,
52 DRM_FORMAT_XRGB2101010,
53 DRM_FORMAT_XBGR2101010,
54 DRM_FORMAT_ARGB2101010,
55 DRM_FORMAT_ABGR2101010,
56 DRM_FORMAT_XRGB16161616,
57 DRM_FORMAT_XBGR16161616,
58 DRM_FORMAT_ARGB16161616,
59 DRM_FORMAT_ABGR16161616,
60 DRM_FORMAT_XBGR8888,
61 DRM_FORMAT_ABGR8888,
62 DRM_FORMAT_RGB565,
63};
64
65static const uint32_t overlay_formats[] = {
66 DRM_FORMAT_XRGB8888,
67 DRM_FORMAT_ARGB8888,
68 DRM_FORMAT_RGBA8888,
69 DRM_FORMAT_XBGR8888,
70 DRM_FORMAT_ABGR8888,
71 DRM_FORMAT_RGB565,
72 DRM_FORMAT_NV21,
73 DRM_FORMAT_NV12,
74 DRM_FORMAT_P010
75};
76
77static const uint32_t video_formats[] = {
78 DRM_FORMAT_NV21,
79 DRM_FORMAT_NV12,
80 DRM_FORMAT_P010
81};
82
83static const u32 cursor_formats[] = {
84 DRM_FORMAT_ARGB8888
85};
86
87enum dm_micro_swizzle {
88 MICRO_SWIZZLE_Z = 0,
89 MICRO_SWIZZLE_S = 1,
90 MICRO_SWIZZLE_D = 2,
91 MICRO_SWIZZLE_R = 3
92};
93
94const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
95{
96 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
97}
98
99void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
100 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
101 bool *global_alpha, int *global_alpha_value)
102{
103 *per_pixel_alpha = false;
104 *pre_multiplied_alpha = true;
105 *global_alpha = false;
106 *global_alpha_value = 0xff;
107
108
109 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
110 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
111 static const uint32_t alpha_formats[] = {
112 DRM_FORMAT_ARGB8888,
113 DRM_FORMAT_RGBA8888,
114 DRM_FORMAT_ABGR8888,
115 DRM_FORMAT_ARGB2101010,
116 DRM_FORMAT_ABGR2101010,
117 DRM_FORMAT_ARGB16161616,
118 DRM_FORMAT_ABGR16161616,
119 DRM_FORMAT_ARGB16161616F,
120 };
121 uint32_t format = plane_state->fb->format->format;
122 unsigned int i;
123
124 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
125 if (format == alpha_formats[i]) {
126 *per_pixel_alpha = true;
127 break;
128 }
129 }
130
131 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
132 *pre_multiplied_alpha = false;
133 }
134
135 if (plane_state->alpha < 0xffff) {
136 *global_alpha = true;
137 *global_alpha_value = plane_state->alpha >> 8;
138 }
139}
140
141static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
142{
143 if (!*mods)
144 return;
145
146 if (*cap - *size < 1) {
147 uint64_t new_cap = *cap * 2;
148 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
149
150 if (!new_mods) {
151 kfree(*mods);
152 *mods = NULL;
153 return;
154 }
155
156 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
157 kfree(*mods);
158 *mods = new_mods;
159 *cap = new_cap;
160 }
161
162 (*mods)[*size] = mod;
163 *size += 1;
164}
165
166static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier)
167{
168 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
169}
170
171static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier)
172{
173 if (modifier == DRM_FORMAT_MOD_LINEAR)
174 return 0;
175
176 return AMD_FMT_MOD_GET(TILE, modifier);
177}
178
179static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
180 uint64_t tiling_flags)
181{
182 /* Fill GFX8 params */
183 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
184 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
185
186 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
187 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
188 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
189 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
190 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
191
192 /* XXX fix me for VI */
193 tiling_info->gfx8.num_banks = num_banks;
194 tiling_info->gfx8.array_mode =
195 DC_ARRAY_2D_TILED_THIN1;
196 tiling_info->gfx8.tile_split = tile_split;
197 tiling_info->gfx8.bank_width = bankw;
198 tiling_info->gfx8.bank_height = bankh;
199 tiling_info->gfx8.tile_aspect = mtaspect;
200 tiling_info->gfx8.tile_mode =
201 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
202 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
203 == DC_ARRAY_1D_TILED_THIN1) {
204 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
205 }
206
207 tiling_info->gfx8.pipe_config =
208 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
209}
210
211static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
212 union dc_tiling_info *tiling_info)
213{
214 /* Fill GFX9 params */
215 tiling_info->gfx9.num_pipes =
216 adev->gfx.config.gb_addr_config_fields.num_pipes;
217 tiling_info->gfx9.num_banks =
218 adev->gfx.config.gb_addr_config_fields.num_banks;
219 tiling_info->gfx9.pipe_interleave =
220 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
221 tiling_info->gfx9.num_shader_engines =
222 adev->gfx.config.gb_addr_config_fields.num_se;
223 tiling_info->gfx9.max_compressed_frags =
224 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
225 tiling_info->gfx9.num_rb_per_se =
226 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
227 tiling_info->gfx9.shaderEnable = 1;
228 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
229 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
230}
231
232static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
233 union dc_tiling_info *tiling_info,
234 uint64_t modifier)
235{
236 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
237 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
238 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
239 unsigned int pipes_log2;
240
241 pipes_log2 = min(5u, mod_pipe_xor_bits);
242
243 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
244
245 if (!IS_AMD_FMT_MOD(modifier))
246 return;
247
248 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
249 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
250
251 if (adev->family >= AMDGPU_FAMILY_NV) {
252 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
253 } else {
254 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
255
256 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
257 }
258}
259
260static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
261 const enum surface_pixel_format format,
262 const enum dc_rotation_angle rotation,
263 const union dc_tiling_info *tiling_info,
264 const struct dc_plane_dcc_param *dcc,
265 const struct dc_plane_address *address,
266 const struct plane_size *plane_size)
267{
268 struct dc *dc = adev->dm.dc;
269 struct dc_dcc_surface_param input;
270 struct dc_surface_dcc_cap output;
271
272 memset(&input, 0, sizeof(input));
273 memset(&output, 0, sizeof(output));
274
275 if (!dcc->enable)
276 return 0;
277
278 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
279 !dc->cap_funcs.get_dcc_compression_cap)
280 return -EINVAL;
281
282 input.format = format;
283 input.surface_size.width = plane_size->surface_size.width;
284 input.surface_size.height = plane_size->surface_size.height;
285 input.swizzle_mode = tiling_info->gfx9.swizzle;
286
287 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
288 input.scan = SCAN_DIRECTION_HORIZONTAL;
289 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
290 input.scan = SCAN_DIRECTION_VERTICAL;
291
292 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
293 return -EINVAL;
294
295 if (!output.capable)
296 return -EINVAL;
297
298 if (dcc->independent_64b_blks == 0 &&
299 output.grph.rgb.independent_64b_blks != 0)
300 return -EINVAL;
301
302 return 0;
303}
304
305static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
306 const struct amdgpu_framebuffer *afb,
307 const enum surface_pixel_format format,
308 const enum dc_rotation_angle rotation,
309 const struct plane_size *plane_size,
310 union dc_tiling_info *tiling_info,
311 struct dc_plane_dcc_param *dcc,
312 struct dc_plane_address *address)
313{
314 const uint64_t modifier = afb->base.modifier;
315 int ret = 0;
316
317 amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
318 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
319
320 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
321 uint64_t dcc_address = afb->address + afb->base.offsets[1];
322 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
323 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
324
325 dcc->enable = 1;
326 dcc->meta_pitch = afb->base.pitches[1];
327 dcc->independent_64b_blks = independent_64b_blks;
328 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
329 if (independent_64b_blks && independent_128b_blks)
330 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
331 else if (independent_128b_blks)
332 dcc->dcc_ind_blk = hubp_ind_block_128b;
333 else if (independent_64b_blks && !independent_128b_blks)
334 dcc->dcc_ind_blk = hubp_ind_block_64b;
335 else
336 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
337 } else {
338 if (independent_64b_blks)
339 dcc->dcc_ind_blk = hubp_ind_block_64b;
340 else
341 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
342 }
343
344 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
345 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
346 }
347
348 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
349 if (ret)
350 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
351
352 return ret;
353}
354
355static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device *adev,
356 const struct amdgpu_framebuffer *afb,
357 const enum surface_pixel_format format,
358 const enum dc_rotation_angle rotation,
359 const struct plane_size *plane_size,
360 union dc_tiling_info *tiling_info,
361 struct dc_plane_dcc_param *dcc,
362 struct dc_plane_address *address)
363{
364 const uint64_t modifier = afb->base.modifier;
365 int ret = 0;
366
367 /* TODO: Most of this function shouldn't be needed on GFX12. */
368 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
369
370 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
371
372 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
373 int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
374
375 dcc->enable = 1;
376 dcc->independent_64b_blks = max_compressed_block == 0;
377
378 if (max_compressed_block == 0)
379 dcc->dcc_ind_blk = hubp_ind_block_64b;
380 else if (max_compressed_block == 1)
381 dcc->dcc_ind_blk = hubp_ind_block_128b;
382 else
383 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
384 }
385
386 /* TODO: This seems wrong because there is no DCC plane on GFX12. */
387 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
388 if (ret)
389 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
390
391 return ret;
392}
393
394static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev,
395 uint64_t **mods,
396 uint64_t *size,
397 uint64_t *capacity)
398{
399 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
400
401 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
402 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
403 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
404 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
405 AMD_FMT_MOD_SET(DCC, 1) |
406 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
407 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
408 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
409
410 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
411 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
412 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
413 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
414 AMD_FMT_MOD_SET(DCC, 1) |
415 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
416 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
417 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
418 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
419
420 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
421 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
422 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
423 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
424
425 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
426 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
427 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
428 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
429
430
431 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
432 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
433 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
434 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
435
436 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
437 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
438 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
439}
440
441static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev,
442 uint64_t **mods,
443 uint64_t *size,
444 uint64_t *capacity)
445{
446 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
447 int pipe_xor_bits = min(8, pipes +
448 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
449 int bank_xor_bits = min(8 - pipe_xor_bits,
450 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
451 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
452 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
453
454
455 if (adev->family == AMDGPU_FAMILY_RV) {
456 /* Raven2 and later */
457 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
458
459 /*
460 * No _D DCC swizzles yet because we only allow 32bpp, which
461 * doesn't support _D on DCN
462 */
463
464 if (has_constant_encode) {
465 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
466 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
467 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
468 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
469 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
470 AMD_FMT_MOD_SET(DCC, 1) |
471 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
472 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
473 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
474 }
475
476 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
477 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
478 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
479 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
480 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
481 AMD_FMT_MOD_SET(DCC, 1) |
482 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
483 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
484 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
485
486 if (has_constant_encode) {
487 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
488 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
489 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
490 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
491 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
492 AMD_FMT_MOD_SET(DCC, 1) |
493 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
494 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
495 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
496 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
497 AMD_FMT_MOD_SET(RB, rb) |
498 AMD_FMT_MOD_SET(PIPE, pipes));
499 }
500
501 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
502 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
503 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
504 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
505 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
506 AMD_FMT_MOD_SET(DCC, 1) |
507 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
508 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
509 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
510 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
511 AMD_FMT_MOD_SET(RB, rb) |
512 AMD_FMT_MOD_SET(PIPE, pipes));
513 }
514
515 /*
516 * Only supported for 64bpp on Raven, will be filtered on format in
517 * amdgpu_dm_plane_format_mod_supported.
518 */
519 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
520 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
521 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
522 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
523 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
524
525 if (adev->family == AMDGPU_FAMILY_RV) {
526 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
527 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
528 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
529 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
530 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
531 }
532
533 /*
534 * Only supported for 64bpp on Raven, will be filtered on format in
535 * amdgpu_dm_plane_format_mod_supported.
536 */
537 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
538 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
539 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
540
541 if (adev->family == AMDGPU_FAMILY_RV) {
542 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
543 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
544 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
545 }
546}
547
548static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev,
549 uint64_t **mods,
550 uint64_t *size,
551 uint64_t *capacity)
552{
553 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
554 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
555
556 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
557 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
558 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
559 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
560 AMD_FMT_MOD_SET(PACKERS, pkrs) |
561 AMD_FMT_MOD_SET(DCC, 1) |
562 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
563 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
564 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
565 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
566
567 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
568 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
569 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
570 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
571 AMD_FMT_MOD_SET(PACKERS, pkrs) |
572 AMD_FMT_MOD_SET(DCC, 1) |
573 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
574 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
575 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
576
577 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
578 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
579 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
580 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
581 AMD_FMT_MOD_SET(PACKERS, pkrs) |
582 AMD_FMT_MOD_SET(DCC, 1) |
583 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
584 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
585 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
586 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
587 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
588
589 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
590 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
591 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
592 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
593 AMD_FMT_MOD_SET(PACKERS, pkrs) |
594 AMD_FMT_MOD_SET(DCC, 1) |
595 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
596 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
597 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
598 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
599
600 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
601 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
602 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
603 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
604 AMD_FMT_MOD_SET(PACKERS, pkrs));
605
606 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
607 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
608 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
609 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
610 AMD_FMT_MOD_SET(PACKERS, pkrs));
611
612 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
613 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
614 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
615 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
616
617 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
618 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
619 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
620}
621
622static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev,
623 uint64_t **mods, uint64_t *size, uint64_t *capacity)
624{
625 int num_pipes = 0;
626 int pipe_xor_bits = 0;
627 int num_pkrs = 0;
628 int pkrs = 0;
629 u32 gb_addr_config;
630 u8 i = 0;
631 unsigned int swizzle_r_x;
632 uint64_t modifier_r_x;
633 uint64_t modifier_dcc_best;
634 uint64_t modifier_dcc_4k;
635
636 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
637 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes}
638 */
639 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
640 ASSERT(gb_addr_config != 0);
641
642 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
643 pkrs = ilog2(num_pkrs);
644 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
645 pipe_xor_bits = ilog2(num_pipes);
646
647 for (i = 0; i < 2; i++) {
648 /* Insert the best one first. */
649 /* R_X swizzle modes are the best for rendering and DCC requires them. */
650 if (num_pipes > 16)
651 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
652 else
653 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
654
655 modifier_r_x = AMD_FMT_MOD |
656 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
657 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
658 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
659 AMD_FMT_MOD_SET(PACKERS, pkrs);
660
661 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
662 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
663 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
664 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
665 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
666
667 /* DCC settings for 4K and greater resolutions. (required by display hw) */
668 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
669 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
670 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
671 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
672
673 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best);
674 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k);
675
676 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
677 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
678
679 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x);
680 }
681
682 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
683 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
684 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
685}
686
687static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev,
688 uint64_t **mods, uint64_t *size, uint64_t *capacity)
689{
690 uint64_t ver = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12);
691 uint64_t mod_256k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D);
692 uint64_t mod_64k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D);
693 uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D);
694 uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
695 uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1);
696 uint8_t max_comp_block[] = {1, 0};
697 uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0};
698 uint8_t i = 0, j = 0;
699 uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR};
700
701 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
702 max_comp_block_mod[i] = AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block[i]);
703
704 /* With DCC: Best choice should be kept first. Hence, add all 256k modifiers of different
705 * max compressed blocks first and then move on to the next smaller sized layouts.
706 * Do not add the linear modifier here, and hence the condition of size-1 for the loop
707 */
708 for (j = 0; j < ARRAY_SIZE(gfx12_modifiers) - 1; j++)
709 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
710 amdgpu_dm_plane_add_modifier(mods, size, capacity,
711 ver | dcc | max_comp_block_mod[i] | gfx12_modifiers[j]);
712
713 /* Without DCC. Add all modifiers including linear at the end */
714 for (i = 0; i < ARRAY_SIZE(gfx12_modifiers); i++)
715 amdgpu_dm_plane_add_modifier(mods, size, capacity, gfx12_modifiers[i]);
716
717}
718
719static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
720{
721 uint64_t size = 0, capacity = 128;
722 *mods = NULL;
723
724 /* We have not hooked up any pre-GFX9 modifiers. */
725 if (adev->family < AMDGPU_FAMILY_AI)
726 return 0;
727
728 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
729
730 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
731 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
732 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
733 return *mods ? 0 : -ENOMEM;
734 }
735
736 switch (adev->family) {
737 case AMDGPU_FAMILY_AI:
738 case AMDGPU_FAMILY_RV:
739 amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity);
740 break;
741 case AMDGPU_FAMILY_NV:
742 case AMDGPU_FAMILY_VGH:
743 case AMDGPU_FAMILY_YC:
744 case AMDGPU_FAMILY_GC_10_3_6:
745 case AMDGPU_FAMILY_GC_10_3_7:
746 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
747 amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity);
748 else
749 amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity);
750 break;
751 case AMDGPU_FAMILY_GC_11_0_0:
752 case AMDGPU_FAMILY_GC_11_0_1:
753 case AMDGPU_FAMILY_GC_11_5_0:
754 amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity);
755 break;
756 case AMDGPU_FAMILY_GC_12_0_0:
757 amdgpu_dm_plane_add_gfx12_modifiers(adev, mods, &size, &capacity);
758 break;
759 }
760
761 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
762
763 /* INVALID marks the end of the list. */
764 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
765
766 if (!*mods)
767 return -ENOMEM;
768
769 return 0;
770}
771
772static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane,
773 const struct dc_plane_cap *plane_cap,
774 uint32_t *formats, int max_formats)
775{
776 int i, num_formats = 0;
777
778 /*
779 * TODO: Query support for each group of formats directly from
780 * DC plane caps. This will require adding more formats to the
781 * caps list.
782 */
783
784 if (plane->type == DRM_PLANE_TYPE_PRIMARY ||
785 (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) {
786 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
787 if (num_formats >= max_formats)
788 break;
789
790 formats[num_formats++] = rgb_formats[i];
791 }
792
793 if (plane_cap && plane_cap->pixel_format_support.nv12)
794 formats[num_formats++] = DRM_FORMAT_NV12;
795 if (plane_cap && plane_cap->pixel_format_support.p010)
796 formats[num_formats++] = DRM_FORMAT_P010;
797 if (plane_cap && plane_cap->pixel_format_support.fp16) {
798 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
799 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
800 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
801 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
802 }
803 } else {
804 switch (plane->type) {
805 case DRM_PLANE_TYPE_OVERLAY:
806 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
807 if (num_formats >= max_formats)
808 break;
809
810 formats[num_formats++] = overlay_formats[i];
811 }
812 break;
813
814 case DRM_PLANE_TYPE_CURSOR:
815 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
816 if (num_formats >= max_formats)
817 break;
818
819 formats[num_formats++] = cursor_formats[i];
820 }
821 break;
822
823 default:
824 break;
825 }
826 }
827
828 return num_formats;
829}
830
831int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
832 const struct amdgpu_framebuffer *afb,
833 const enum surface_pixel_format format,
834 const enum dc_rotation_angle rotation,
835 const uint64_t tiling_flags,
836 union dc_tiling_info *tiling_info,
837 struct plane_size *plane_size,
838 struct dc_plane_dcc_param *dcc,
839 struct dc_plane_address *address,
840 bool tmz_surface)
841{
842 const struct drm_framebuffer *fb = &afb->base;
843 int ret;
844
845 memset(tiling_info, 0, sizeof(*tiling_info));
846 memset(plane_size, 0, sizeof(*plane_size));
847 memset(dcc, 0, sizeof(*dcc));
848 memset(address, 0, sizeof(*address));
849
850 address->tmz_surface = tmz_surface;
851
852 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
853 uint64_t addr = afb->address + fb->offsets[0];
854
855 plane_size->surface_size.x = 0;
856 plane_size->surface_size.y = 0;
857 plane_size->surface_size.width = fb->width;
858 plane_size->surface_size.height = fb->height;
859 plane_size->surface_pitch =
860 fb->pitches[0] / fb->format->cpp[0];
861
862 address->type = PLN_ADDR_TYPE_GRAPHICS;
863 address->grph.addr.low_part = lower_32_bits(addr);
864 address->grph.addr.high_part = upper_32_bits(addr);
865 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
866 uint64_t luma_addr = afb->address + fb->offsets[0];
867 uint64_t chroma_addr = afb->address + fb->offsets[1];
868
869 plane_size->surface_size.x = 0;
870 plane_size->surface_size.y = 0;
871 plane_size->surface_size.width = fb->width;
872 plane_size->surface_size.height = fb->height;
873 plane_size->surface_pitch =
874 fb->pitches[0] / fb->format->cpp[0];
875
876 plane_size->chroma_size.x = 0;
877 plane_size->chroma_size.y = 0;
878 /* TODO: set these based on surface format */
879 plane_size->chroma_size.width = fb->width / 2;
880 plane_size->chroma_size.height = fb->height / 2;
881
882 plane_size->chroma_pitch =
883 fb->pitches[1] / fb->format->cpp[1];
884
885 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
886 address->video_progressive.luma_addr.low_part =
887 lower_32_bits(luma_addr);
888 address->video_progressive.luma_addr.high_part =
889 upper_32_bits(luma_addr);
890 address->video_progressive.chroma_addr.low_part =
891 lower_32_bits(chroma_addr);
892 address->video_progressive.chroma_addr.high_part =
893 upper_32_bits(chroma_addr);
894 }
895
896 if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) {
897 ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
898 rotation, plane_size,
899 tiling_info, dcc,
900 address);
901 if (ret)
902 return ret;
903 } else if (adev->family >= AMDGPU_FAMILY_AI) {
904 ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
905 rotation, plane_size,
906 tiling_info, dcc,
907 address);
908 if (ret)
909 return ret;
910 } else {
911 amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
912 }
913
914 return 0;
915}
916
917static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
918 struct drm_plane_state *new_state)
919{
920 struct amdgpu_framebuffer *afb;
921 struct drm_gem_object *obj;
922 struct amdgpu_device *adev;
923 struct amdgpu_bo *rbo;
924 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
925 uint32_t domain;
926 int r;
927
928 if (!new_state->fb) {
929 DRM_DEBUG_KMS("No FB bound\n");
930 return 0;
931 }
932
933 afb = to_amdgpu_framebuffer(new_state->fb);
934 obj = drm_gem_fb_get_obj(new_state->fb, 0);
935 if (!obj) {
936 DRM_ERROR("Failed to get obj from framebuffer\n");
937 return -EINVAL;
938 }
939
940 rbo = gem_to_amdgpu_bo(obj);
941 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
942 r = amdgpu_bo_reserve(rbo, true);
943 if (r) {
944 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
945 return r;
946 }
947
948 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
949 if (r) {
950 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
951 goto error_unlock;
952 }
953
954 if (plane->type != DRM_PLANE_TYPE_CURSOR)
955 domain = amdgpu_display_supported_domains(adev, rbo->flags);
956 else
957 domain = AMDGPU_GEM_DOMAIN_VRAM;
958
959 rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
960 r = amdgpu_bo_pin(rbo, domain);
961 if (unlikely(r != 0)) {
962 if (r != -ERESTARTSYS)
963 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
964 goto error_unlock;
965 }
966
967 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
968 if (unlikely(r != 0)) {
969 DRM_ERROR("%p bind failed\n", rbo);
970 goto error_unpin;
971 }
972
973 r = drm_gem_plane_helper_prepare_fb(plane, new_state);
974 if (unlikely(r != 0))
975 goto error_unpin;
976
977 amdgpu_bo_unreserve(rbo);
978
979 afb->address = amdgpu_bo_gpu_offset(rbo);
980
981 amdgpu_bo_ref(rbo);
982
983 /**
984 * We don't do surface updates on planes that have been newly created,
985 * but we also don't have the afb->address during atomic check.
986 *
987 * Fill in buffer attributes depending on the address here, but only on
988 * newly created planes since they're not being used by DC yet and this
989 * won't modify global state.
990 */
991 dm_plane_state_old = to_dm_plane_state(plane->state);
992 dm_plane_state_new = to_dm_plane_state(new_state);
993
994 if (dm_plane_state_new->dc_state &&
995 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
996 struct dc_plane_state *plane_state =
997 dm_plane_state_new->dc_state;
998
999 amdgpu_dm_plane_fill_plane_buffer_attributes(
1000 adev, afb, plane_state->format, plane_state->rotation,
1001 afb->tiling_flags,
1002 &plane_state->tiling_info, &plane_state->plane_size,
1003 &plane_state->dcc, &plane_state->address,
1004 afb->tmz_surface);
1005 }
1006
1007 return 0;
1008
1009error_unpin:
1010 amdgpu_bo_unpin(rbo);
1011
1012error_unlock:
1013 amdgpu_bo_unreserve(rbo);
1014 return r;
1015}
1016
1017static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane,
1018 struct drm_plane_state *old_state)
1019{
1020 struct amdgpu_bo *rbo;
1021 int r;
1022
1023 if (!old_state->fb)
1024 return;
1025
1026 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
1027 r = amdgpu_bo_reserve(rbo, false);
1028 if (unlikely(r)) {
1029 DRM_ERROR("failed to reserve rbo before unpin\n");
1030 return;
1031 }
1032
1033 amdgpu_bo_unpin(rbo);
1034 amdgpu_bo_unreserve(rbo);
1035 amdgpu_bo_unref(&rbo);
1036}
1037
1038static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev,
1039 struct drm_framebuffer *fb,
1040 int *min_downscale, int *max_upscale)
1041{
1042 struct amdgpu_device *adev = drm_to_adev(dev);
1043 struct dc *dc = adev->dm.dc;
1044 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
1045 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
1046
1047 switch (fb->format->format) {
1048 case DRM_FORMAT_P010:
1049 case DRM_FORMAT_NV12:
1050 case DRM_FORMAT_NV21:
1051 *max_upscale = plane_cap->max_upscale_factor.nv12;
1052 *min_downscale = plane_cap->max_downscale_factor.nv12;
1053 break;
1054
1055 case DRM_FORMAT_XRGB16161616F:
1056 case DRM_FORMAT_ARGB16161616F:
1057 case DRM_FORMAT_XBGR16161616F:
1058 case DRM_FORMAT_ABGR16161616F:
1059 *max_upscale = plane_cap->max_upscale_factor.fp16;
1060 *min_downscale = plane_cap->max_downscale_factor.fp16;
1061 break;
1062
1063 default:
1064 *max_upscale = plane_cap->max_upscale_factor.argb8888;
1065 *min_downscale = plane_cap->max_downscale_factor.argb8888;
1066 break;
1067 }
1068
1069 /*
1070 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
1071 * scaling factor of 1.0 == 1000 units.
1072 */
1073 if (*max_upscale == 1)
1074 *max_upscale = 1000;
1075
1076 if (*min_downscale == 1)
1077 *min_downscale = 1000;
1078}
1079
1080int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
1081 struct drm_crtc_state *new_crtc_state)
1082{
1083 struct drm_framebuffer *fb = state->fb;
1084 int min_downscale, max_upscale;
1085 int min_scale = 0;
1086 int max_scale = INT_MAX;
1087
1088 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
1089 if (fb && state->crtc) {
1090 /* Validate viewport to cover the case when only the position changes */
1091 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
1092 int viewport_width = state->crtc_w;
1093 int viewport_height = state->crtc_h;
1094
1095 if (state->crtc_x < 0)
1096 viewport_width += state->crtc_x;
1097 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
1098 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
1099
1100 if (state->crtc_y < 0)
1101 viewport_height += state->crtc_y;
1102 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
1103 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
1104
1105 if (viewport_width < 0 || viewport_height < 0) {
1106 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
1107 return -EINVAL;
1108 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
1109 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
1110 return -EINVAL;
1111 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
1112 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
1113 return -EINVAL;
1114 }
1115
1116 }
1117
1118 /* Get min/max allowed scaling factors from plane caps. */
1119 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb,
1120 &min_downscale, &max_upscale);
1121 /*
1122 * Convert to drm convention: 16.16 fixed point, instead of dc's
1123 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
1124 * dst/src, so min_scale = 1.0 / max_upscale, etc.
1125 */
1126 min_scale = (1000 << 16) / max_upscale;
1127 max_scale = (1000 << 16) / min_downscale;
1128 }
1129
1130 return drm_atomic_helper_check_plane_state(
1131 state, new_crtc_state, min_scale, max_scale, true, true);
1132}
1133
1134int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
1135 const struct drm_plane_state *state,
1136 struct dc_scaling_info *scaling_info)
1137{
1138 int scale_w, scale_h, min_downscale, max_upscale;
1139
1140 memset(scaling_info, 0, sizeof(*scaling_info));
1141
1142 /* Source is fixed 16.16 but we ignore mantissa for now... */
1143 scaling_info->src_rect.x = state->src_x >> 16;
1144 scaling_info->src_rect.y = state->src_y >> 16;
1145
1146 /*
1147 * For reasons we don't (yet) fully understand a non-zero
1148 * src_y coordinate into an NV12 buffer can cause a
1149 * system hang on DCN1x.
1150 * To avoid hangs (and maybe be overly cautious)
1151 * let's reject both non-zero src_x and src_y.
1152 *
1153 * We currently know of only one use-case to reproduce a
1154 * scenario with non-zero src_x and src_y for NV12, which
1155 * is to gesture the YouTube Android app into full screen
1156 * on ChromeOS.
1157 */
1158 if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1159 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) &&
1160 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
1161 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
1162 return -EINVAL;
1163
1164 scaling_info->src_rect.width = state->src_w >> 16;
1165 if (scaling_info->src_rect.width == 0)
1166 return -EINVAL;
1167
1168 scaling_info->src_rect.height = state->src_h >> 16;
1169 if (scaling_info->src_rect.height == 0)
1170 return -EINVAL;
1171
1172 scaling_info->dst_rect.x = state->crtc_x;
1173 scaling_info->dst_rect.y = state->crtc_y;
1174
1175 if (state->crtc_w == 0)
1176 return -EINVAL;
1177
1178 scaling_info->dst_rect.width = state->crtc_w;
1179
1180 if (state->crtc_h == 0)
1181 return -EINVAL;
1182
1183 scaling_info->dst_rect.height = state->crtc_h;
1184
1185 /* DRM doesn't specify clipping on destination output. */
1186 scaling_info->clip_rect = scaling_info->dst_rect;
1187
1188 /* Validate scaling per-format with DC plane caps */
1189 if (state->plane && state->plane->dev && state->fb) {
1190 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
1191 &min_downscale, &max_upscale);
1192 } else {
1193 min_downscale = 250;
1194 max_upscale = 16000;
1195 }
1196
1197 scale_w = scaling_info->dst_rect.width * 1000 /
1198 scaling_info->src_rect.width;
1199
1200 if (scale_w < min_downscale || scale_w > max_upscale)
1201 return -EINVAL;
1202
1203 scale_h = scaling_info->dst_rect.height * 1000 /
1204 scaling_info->src_rect.height;
1205
1206 if (scale_h < min_downscale || scale_h > max_upscale)
1207 return -EINVAL;
1208
1209 /*
1210 * The "scaling_quality" can be ignored for now, quality = 0 has DC
1211 * assume reasonable defaults based on the format.
1212 */
1213
1214 return 0;
1215}
1216
1217static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
1218 struct drm_atomic_state *state)
1219{
1220 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1221 plane);
1222 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1223 struct dc *dc = adev->dm.dc;
1224 struct dm_plane_state *dm_plane_state;
1225 struct dc_scaling_info scaling_info;
1226 struct drm_crtc_state *new_crtc_state;
1227 int ret;
1228
1229 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
1230
1231 dm_plane_state = to_dm_plane_state(new_plane_state);
1232
1233 if (!dm_plane_state->dc_state)
1234 return 0;
1235
1236 new_crtc_state =
1237 drm_atomic_get_new_crtc_state(state,
1238 new_plane_state->crtc);
1239 if (!new_crtc_state)
1240 return -EINVAL;
1241
1242 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
1243 if (ret)
1244 return ret;
1245
1246 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
1247 if (ret)
1248 return ret;
1249
1250 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
1251 return 0;
1252
1253 return -EINVAL;
1254}
1255
1256static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
1257 struct drm_atomic_state *state)
1258{
1259 struct drm_crtc_state *new_crtc_state;
1260 struct drm_plane_state *new_plane_state;
1261 struct dm_crtc_state *dm_new_crtc_state;
1262
1263 /* Only support async updates on cursor planes. */
1264 if (plane->type != DRM_PLANE_TYPE_CURSOR)
1265 return -EINVAL;
1266
1267 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
1268 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
1269 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1270 /* Reject overlay cursors for now*/
1271 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
1272 return -EINVAL;
1273
1274 return 0;
1275}
1276
1277int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
1278 struct dc_cursor_position *position)
1279{
1280 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1281 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1282 int x, y;
1283 int xorigin = 0, yorigin = 0;
1284
1285 if (!crtc || !plane->state->fb)
1286 return 0;
1287
1288 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
1289 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
1290 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
1291 __func__,
1292 plane->state->crtc_w,
1293 plane->state->crtc_h);
1294 return -EINVAL;
1295 }
1296
1297 x = plane->state->crtc_x;
1298 y = plane->state->crtc_y;
1299
1300 if (x <= -amdgpu_crtc->max_cursor_width ||
1301 y <= -amdgpu_crtc->max_cursor_height)
1302 return 0;
1303
1304 if (x < 0) {
1305 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1306 x = 0;
1307 }
1308 if (y < 0) {
1309 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1310 y = 0;
1311 }
1312 position->enable = true;
1313 position->x = x;
1314 position->y = y;
1315 position->x_hotspot = xorigin;
1316 position->y_hotspot = yorigin;
1317
1318 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(4, 0, 1))
1319 position->translate_by_source = true;
1320
1321 return 0;
1322}
1323
1324void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
1325 struct drm_plane_state *old_plane_state)
1326{
1327 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1328 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
1329 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
1330 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
1331 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1332 uint64_t address = afb ? afb->address : 0;
1333 struct dc_cursor_position position = {0};
1334 struct dc_cursor_attributes attributes;
1335 int ret;
1336
1337 if (!plane->state->fb && !old_plane_state->fb)
1338 return;
1339
1340 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n",
1341 amdgpu_crtc->crtc_id, plane->state->crtc_w,
1342 plane->state->crtc_h);
1343
1344 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position);
1345 if (ret)
1346 return;
1347
1348 if (!position.enable) {
1349 /* turn off cursor */
1350 if (crtc_state && crtc_state->stream) {
1351 mutex_lock(&adev->dm.dc_lock);
1352 dc_stream_program_cursor_position(crtc_state->stream,
1353 &position);
1354 mutex_unlock(&adev->dm.dc_lock);
1355 }
1356 return;
1357 }
1358
1359 amdgpu_crtc->cursor_width = plane->state->crtc_w;
1360 amdgpu_crtc->cursor_height = plane->state->crtc_h;
1361
1362 memset(&attributes, 0, sizeof(attributes));
1363 attributes.address.high_part = upper_32_bits(address);
1364 attributes.address.low_part = lower_32_bits(address);
1365 attributes.width = plane->state->crtc_w;
1366 attributes.height = plane->state->crtc_h;
1367 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
1368 attributes.rotation_angle = 0;
1369 attributes.attribute_flags.value = 0;
1370
1371 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
1372 * legacy gamma setup.
1373 */
1374 if (crtc_state->cm_is_degamma_srgb &&
1375 adev->dm.dc->caps.color.dpp.gamma_corr)
1376 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
1377
1378 if (afb)
1379 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
1380
1381 if (crtc_state->stream) {
1382 mutex_lock(&adev->dm.dc_lock);
1383 if (!dc_stream_program_cursor_attributes(crtc_state->stream,
1384 &attributes))
1385 DRM_ERROR("DC failed to set cursor attributes\n");
1386
1387 if (!dc_stream_program_cursor_position(crtc_state->stream,
1388 &position))
1389 DRM_ERROR("DC failed to set cursor position\n");
1390 mutex_unlock(&adev->dm.dc_lock);
1391 }
1392}
1393
1394static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane,
1395 struct drm_atomic_state *state)
1396{
1397 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
1398 plane);
1399 struct drm_plane_state *old_state =
1400 drm_atomic_get_old_plane_state(state, plane);
1401
1402 trace_amdgpu_dm_atomic_update_cursor(new_state);
1403
1404 swap(plane->state->fb, new_state->fb);
1405
1406 plane->state->src_x = new_state->src_x;
1407 plane->state->src_y = new_state->src_y;
1408 plane->state->src_w = new_state->src_w;
1409 plane->state->src_h = new_state->src_h;
1410 plane->state->crtc_x = new_state->crtc_x;
1411 plane->state->crtc_y = new_state->crtc_y;
1412 plane->state->crtc_w = new_state->crtc_w;
1413 plane->state->crtc_h = new_state->crtc_h;
1414
1415 amdgpu_dm_plane_handle_cursor_update(plane, old_state);
1416}
1417
1418static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1419 .prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
1420 .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
1421 .atomic_check = amdgpu_dm_plane_atomic_check,
1422 .atomic_async_check = amdgpu_dm_plane_atomic_async_check,
1423 .atomic_async_update = amdgpu_dm_plane_atomic_async_update
1424};
1425
1426static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
1427{
1428 struct dm_plane_state *amdgpu_state = NULL;
1429
1430 if (plane->state)
1431 plane->funcs->atomic_destroy_state(plane, plane->state);
1432
1433 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
1434 WARN_ON(amdgpu_state == NULL);
1435
1436 if (!amdgpu_state)
1437 return;
1438
1439 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
1440 amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1441 amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT;
1442 amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1443 amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1444}
1445
1446static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane)
1447{
1448 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
1449
1450 old_dm_plane_state = to_dm_plane_state(plane->state);
1451 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
1452 if (!dm_plane_state)
1453 return NULL;
1454
1455 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
1456
1457 if (old_dm_plane_state->dc_state) {
1458 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
1459 dc_plane_state_retain(dm_plane_state->dc_state);
1460 }
1461
1462 if (old_dm_plane_state->degamma_lut)
1463 dm_plane_state->degamma_lut =
1464 drm_property_blob_get(old_dm_plane_state->degamma_lut);
1465 if (old_dm_plane_state->ctm)
1466 dm_plane_state->ctm =
1467 drm_property_blob_get(old_dm_plane_state->ctm);
1468 if (old_dm_plane_state->shaper_lut)
1469 dm_plane_state->shaper_lut =
1470 drm_property_blob_get(old_dm_plane_state->shaper_lut);
1471 if (old_dm_plane_state->lut3d)
1472 dm_plane_state->lut3d =
1473 drm_property_blob_get(old_dm_plane_state->lut3d);
1474 if (old_dm_plane_state->blend_lut)
1475 dm_plane_state->blend_lut =
1476 drm_property_blob_get(old_dm_plane_state->blend_lut);
1477
1478 dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf;
1479 dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult;
1480 dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf;
1481 dm_plane_state->blend_tf = old_dm_plane_state->blend_tf;
1482
1483 return &dm_plane_state->base;
1484}
1485
1486static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane,
1487 uint32_t format,
1488 uint64_t modifier)
1489{
1490 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1491 const struct drm_format_info *info = drm_format_info(format);
1492 int i;
1493
1494 if (!info)
1495 return false;
1496
1497 /*
1498 * We always have to allow these modifiers:
1499 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
1500 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
1501 */
1502 if (modifier == DRM_FORMAT_MOD_LINEAR ||
1503 modifier == DRM_FORMAT_MOD_INVALID) {
1504 return true;
1505 }
1506
1507 /* Check that the modifier is on the list of the plane's supported modifiers. */
1508 for (i = 0; i < plane->modifier_count; i++) {
1509 if (modifier == plane->modifiers[i])
1510 break;
1511 }
1512 if (i == plane->modifier_count)
1513 return false;
1514
1515 /* GFX12 doesn't have these limitations. */
1516 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) {
1517 enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3;
1518
1519 /*
1520 * For D swizzle the canonical modifier depends on the bpp, so check
1521 * it here.
1522 */
1523 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
1524 adev->family >= AMDGPU_FAMILY_NV) {
1525 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
1526 return false;
1527 }
1528
1529 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
1530 info->cpp[0] < 8)
1531 return false;
1532
1533 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
1534 /* Per radeonsi comments 16/64 bpp are more complicated. */
1535 if (info->cpp[0] != 4)
1536 return false;
1537 /* We support multi-planar formats, but not when combined with
1538 * additional DCC metadata planes.
1539 */
1540 if (info->num_planes > 1)
1541 return false;
1542 }
1543 }
1544
1545 return true;
1546}
1547
1548static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane,
1549 struct drm_plane_state *state)
1550{
1551 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1552
1553 if (dm_plane_state->degamma_lut)
1554 drm_property_blob_put(dm_plane_state->degamma_lut);
1555 if (dm_plane_state->ctm)
1556 drm_property_blob_put(dm_plane_state->ctm);
1557 if (dm_plane_state->lut3d)
1558 drm_property_blob_put(dm_plane_state->lut3d);
1559 if (dm_plane_state->shaper_lut)
1560 drm_property_blob_put(dm_plane_state->shaper_lut);
1561 if (dm_plane_state->blend_lut)
1562 drm_property_blob_put(dm_plane_state->blend_lut);
1563
1564 if (dm_plane_state->dc_state)
1565 dc_plane_state_release(dm_plane_state->dc_state);
1566
1567 drm_atomic_helper_plane_destroy_state(plane, state);
1568}
1569
1570#ifdef AMD_PRIVATE_COLOR
1571static void
1572dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
1573 struct drm_plane *plane)
1574{
1575 struct amdgpu_mode_info mode_info = dm->adev->mode_info;
1576 struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp;
1577
1578 /* Check HW color pipeline capabilities on DPP block (pre-blending)
1579 * before exposing related properties.
1580 */
1581 if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) {
1582 drm_object_attach_property(&plane->base,
1583 mode_info.plane_degamma_lut_property,
1584 0);
1585 drm_object_attach_property(&plane->base,
1586 mode_info.plane_degamma_lut_size_property,
1587 MAX_COLOR_LUT_ENTRIES);
1588 drm_object_attach_property(&plane->base,
1589 dm->adev->mode_info.plane_degamma_tf_property,
1590 AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1591 }
1592 /* HDR MULT is always available */
1593 drm_object_attach_property(&plane->base,
1594 dm->adev->mode_info.plane_hdr_mult_property,
1595 AMDGPU_HDR_MULT_DEFAULT);
1596
1597 /* Only enable plane CTM if both DPP and MPC gamut remap is available. */
1598 if (dm->dc->caps.color.mpc.gamut_remap)
1599 drm_object_attach_property(&plane->base,
1600 dm->adev->mode_info.plane_ctm_property, 0);
1601
1602 if (dpp_color_caps.hw_3d_lut) {
1603 drm_object_attach_property(&plane->base,
1604 mode_info.plane_shaper_lut_property, 0);
1605 drm_object_attach_property(&plane->base,
1606 mode_info.plane_shaper_lut_size_property,
1607 MAX_COLOR_LUT_ENTRIES);
1608 drm_object_attach_property(&plane->base,
1609 mode_info.plane_shaper_tf_property,
1610 AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1611 drm_object_attach_property(&plane->base,
1612 mode_info.plane_lut3d_property, 0);
1613 drm_object_attach_property(&plane->base,
1614 mode_info.plane_lut3d_size_property,
1615 MAX_COLOR_3DLUT_SIZE);
1616 }
1617
1618 if (dpp_color_caps.ogam_ram) {
1619 drm_object_attach_property(&plane->base,
1620 mode_info.plane_blend_lut_property, 0);
1621 drm_object_attach_property(&plane->base,
1622 mode_info.plane_blend_lut_size_property,
1623 MAX_COLOR_LUT_ENTRIES);
1624 drm_object_attach_property(&plane->base,
1625 mode_info.plane_blend_tf_property,
1626 AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1627 }
1628}
1629
1630static int
1631dm_atomic_plane_set_property(struct drm_plane *plane,
1632 struct drm_plane_state *state,
1633 struct drm_property *property,
1634 uint64_t val)
1635{
1636 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1637 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1638 bool replaced = false;
1639 int ret;
1640
1641 if (property == adev->mode_info.plane_degamma_lut_property) {
1642 ret = drm_property_replace_blob_from_id(plane->dev,
1643 &dm_plane_state->degamma_lut,
1644 val, -1,
1645 sizeof(struct drm_color_lut),
1646 &replaced);
1647 dm_plane_state->base.color_mgmt_changed |= replaced;
1648 return ret;
1649 } else if (property == adev->mode_info.plane_degamma_tf_property) {
1650 if (dm_plane_state->degamma_tf != val) {
1651 dm_plane_state->degamma_tf = val;
1652 dm_plane_state->base.color_mgmt_changed = 1;
1653 }
1654 } else if (property == adev->mode_info.plane_hdr_mult_property) {
1655 if (dm_plane_state->hdr_mult != val) {
1656 dm_plane_state->hdr_mult = val;
1657 dm_plane_state->base.color_mgmt_changed = 1;
1658 }
1659 } else if (property == adev->mode_info.plane_ctm_property) {
1660 ret = drm_property_replace_blob_from_id(plane->dev,
1661 &dm_plane_state->ctm,
1662 val,
1663 sizeof(struct drm_color_ctm_3x4), -1,
1664 &replaced);
1665 dm_plane_state->base.color_mgmt_changed |= replaced;
1666 return ret;
1667 } else if (property == adev->mode_info.plane_shaper_lut_property) {
1668 ret = drm_property_replace_blob_from_id(plane->dev,
1669 &dm_plane_state->shaper_lut,
1670 val, -1,
1671 sizeof(struct drm_color_lut),
1672 &replaced);
1673 dm_plane_state->base.color_mgmt_changed |= replaced;
1674 return ret;
1675 } else if (property == adev->mode_info.plane_shaper_tf_property) {
1676 if (dm_plane_state->shaper_tf != val) {
1677 dm_plane_state->shaper_tf = val;
1678 dm_plane_state->base.color_mgmt_changed = 1;
1679 }
1680 } else if (property == adev->mode_info.plane_lut3d_property) {
1681 ret = drm_property_replace_blob_from_id(plane->dev,
1682 &dm_plane_state->lut3d,
1683 val, -1,
1684 sizeof(struct drm_color_lut),
1685 &replaced);
1686 dm_plane_state->base.color_mgmt_changed |= replaced;
1687 return ret;
1688 } else if (property == adev->mode_info.plane_blend_lut_property) {
1689 ret = drm_property_replace_blob_from_id(plane->dev,
1690 &dm_plane_state->blend_lut,
1691 val, -1,
1692 sizeof(struct drm_color_lut),
1693 &replaced);
1694 dm_plane_state->base.color_mgmt_changed |= replaced;
1695 return ret;
1696 } else if (property == adev->mode_info.plane_blend_tf_property) {
1697 if (dm_plane_state->blend_tf != val) {
1698 dm_plane_state->blend_tf = val;
1699 dm_plane_state->base.color_mgmt_changed = 1;
1700 }
1701 } else {
1702 drm_dbg_atomic(plane->dev,
1703 "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
1704 plane->base.id, plane->name,
1705 property->base.id, property->name);
1706 return -EINVAL;
1707 }
1708
1709 return 0;
1710}
1711
1712static int
1713dm_atomic_plane_get_property(struct drm_plane *plane,
1714 const struct drm_plane_state *state,
1715 struct drm_property *property,
1716 uint64_t *val)
1717{
1718 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1719 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1720
1721 if (property == adev->mode_info.plane_degamma_lut_property) {
1722 *val = (dm_plane_state->degamma_lut) ?
1723 dm_plane_state->degamma_lut->base.id : 0;
1724 } else if (property == adev->mode_info.plane_degamma_tf_property) {
1725 *val = dm_plane_state->degamma_tf;
1726 } else if (property == adev->mode_info.plane_hdr_mult_property) {
1727 *val = dm_plane_state->hdr_mult;
1728 } else if (property == adev->mode_info.plane_ctm_property) {
1729 *val = (dm_plane_state->ctm) ?
1730 dm_plane_state->ctm->base.id : 0;
1731 } else if (property == adev->mode_info.plane_shaper_lut_property) {
1732 *val = (dm_plane_state->shaper_lut) ?
1733 dm_plane_state->shaper_lut->base.id : 0;
1734 } else if (property == adev->mode_info.plane_shaper_tf_property) {
1735 *val = dm_plane_state->shaper_tf;
1736 } else if (property == adev->mode_info.plane_lut3d_property) {
1737 *val = (dm_plane_state->lut3d) ?
1738 dm_plane_state->lut3d->base.id : 0;
1739 } else if (property == adev->mode_info.plane_blend_lut_property) {
1740 *val = (dm_plane_state->blend_lut) ?
1741 dm_plane_state->blend_lut->base.id : 0;
1742 } else if (property == adev->mode_info.plane_blend_tf_property) {
1743 *val = dm_plane_state->blend_tf;
1744
1745 } else {
1746 return -EINVAL;
1747 }
1748
1749 return 0;
1750}
1751#endif
1752
1753static const struct drm_plane_funcs dm_plane_funcs = {
1754 .update_plane = drm_atomic_helper_update_plane,
1755 .disable_plane = drm_atomic_helper_disable_plane,
1756 .destroy = drm_plane_helper_destroy,
1757 .reset = amdgpu_dm_plane_drm_plane_reset,
1758 .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state,
1759 .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state,
1760 .format_mod_supported = amdgpu_dm_plane_format_mod_supported,
1761#ifdef AMD_PRIVATE_COLOR
1762 .atomic_set_property = dm_atomic_plane_set_property,
1763 .atomic_get_property = dm_atomic_plane_get_property,
1764#endif
1765};
1766
1767int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
1768 struct drm_plane *plane,
1769 unsigned long possible_crtcs,
1770 const struct dc_plane_cap *plane_cap)
1771{
1772 uint32_t formats[32];
1773 int num_formats;
1774 int res = -EPERM;
1775 unsigned int supported_rotations;
1776 uint64_t *modifiers = NULL;
1777 unsigned int primary_zpos = dm->dc->caps.max_slave_planes;
1778
1779 num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats,
1780 ARRAY_SIZE(formats));
1781
1782 res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers);
1783 if (res)
1784 return res;
1785
1786 if (modifiers == NULL)
1787 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
1788
1789 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
1790 &dm_plane_funcs, formats, num_formats,
1791 modifiers, plane->type, NULL);
1792 kfree(modifiers);
1793 if (res)
1794 return res;
1795
1796 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
1797 plane_cap && plane_cap->per_pixel_alpha) {
1798 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
1799 BIT(DRM_MODE_BLEND_PREMULTI) |
1800 BIT(DRM_MODE_BLEND_COVERAGE);
1801
1802 drm_plane_create_alpha_property(plane);
1803 drm_plane_create_blend_mode_property(plane, blend_caps);
1804 }
1805
1806 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
1807 /*
1808 * Allow OVERLAY planes to be used as underlays by assigning an
1809 * immutable zpos = # of OVERLAY planes to the PRIMARY plane.
1810 */
1811 drm_plane_create_zpos_immutable_property(plane, primary_zpos);
1812 } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
1813 /*
1814 * OVERLAY planes can be below or above the PRIMARY, but cannot
1815 * be above the CURSOR plane.
1816 */
1817 unsigned int zpos = primary_zpos + 1 + drm_plane_index(plane);
1818
1819 drm_plane_create_zpos_property(plane, zpos, 0, 254);
1820 } else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
1821 drm_plane_create_zpos_immutable_property(plane, 255);
1822 }
1823
1824 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
1825 plane_cap &&
1826 (plane_cap->pixel_format_support.nv12 ||
1827 plane_cap->pixel_format_support.p010)) {
1828 /* This only affects YUV formats. */
1829 drm_plane_create_color_properties(
1830 plane,
1831 BIT(DRM_COLOR_YCBCR_BT601) |
1832 BIT(DRM_COLOR_YCBCR_BT709) |
1833 BIT(DRM_COLOR_YCBCR_BT2020),
1834 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
1835 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1836 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
1837 }
1838
1839 supported_rotations =
1840 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1841 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1842
1843 if (dm->adev->asic_type >= CHIP_BONAIRE &&
1844 plane->type != DRM_PLANE_TYPE_CURSOR)
1845 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1846 supported_rotations);
1847
1848 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) &&
1849 plane->type != DRM_PLANE_TYPE_CURSOR)
1850 drm_plane_enable_fb_damage_clips(plane);
1851
1852 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
1853
1854#ifdef AMD_PRIVATE_COLOR
1855 dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
1856#endif
1857 /* Create (reset) the plane state */
1858 if (plane->funcs->reset)
1859 plane->funcs->reset(plane);
1860
1861 return 0;
1862}
1863
1864bool amdgpu_dm_plane_is_video_format(uint32_t format)
1865{
1866 int i;
1867
1868 for (i = 0; i < ARRAY_SIZE(video_formats); i++)
1869 if (format == video_formats[i])
1870 return true;
1871
1872 return false;
1873}
1874