Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
4 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
5 *
6 * ARM Mali DP plane manipulation routines.
7 */
8
9#include <linux/iommu.h>
10#include <linux/platform_device.h>
11
12#include <drm/drm_atomic.h>
13#include <drm/drm_atomic_helper.h>
14#include <drm/drm_drv.h>
15#include <drm/drm_fb_cma_helper.h>
16#include <drm/drm_fourcc.h>
17#include <drm/drm_gem_cma_helper.h>
18#include <drm/drm_gem_framebuffer_helper.h>
19#include <drm/drm_plane_helper.h>
20#include <drm/drm_print.h>
21
22#include "malidp_hw.h"
23#include "malidp_drv.h"
24
25/* Layer specific register offsets */
26#define MALIDP_LAYER_FORMAT 0x000
27#define LAYER_FORMAT_MASK 0x3f
28#define MALIDP_LAYER_CONTROL 0x004
29#define LAYER_ENABLE (1 << 0)
30#define LAYER_FLOWCFG_MASK 7
31#define LAYER_FLOWCFG(x) (((x) & LAYER_FLOWCFG_MASK) << 1)
32#define LAYER_FLOWCFG_SCALE_SE 3
33#define LAYER_ROT_OFFSET 8
34#define LAYER_H_FLIP (1 << 10)
35#define LAYER_V_FLIP (1 << 11)
36#define LAYER_ROT_MASK (0xf << 8)
37#define LAYER_COMP_MASK (0x3 << 12)
38#define LAYER_COMP_PIXEL (0x3 << 12)
39#define LAYER_COMP_PLANE (0x2 << 12)
40#define LAYER_PMUL_ENABLE (0x1 << 14)
41#define LAYER_ALPHA_OFFSET (16)
42#define LAYER_ALPHA_MASK (0xff)
43#define LAYER_ALPHA(x) (((x) & LAYER_ALPHA_MASK) << LAYER_ALPHA_OFFSET)
44#define MALIDP_LAYER_COMPOSE 0x008
45#define MALIDP_LAYER_SIZE 0x00c
46#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
47#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
48#define MALIDP_LAYER_COMP_SIZE 0x010
49#define MALIDP_LAYER_OFFSET 0x014
50#define MALIDP550_LS_ENABLE 0x01c
51#define MALIDP550_LS_R1_IN_SIZE 0x020
52
53#define MODIFIERS_COUNT_MAX 15
54
55/*
56 * This 4-entry look-up-table is used to determine the full 8-bit alpha value
57 * for formats with 1- or 2-bit alpha channels.
58 * We set it to give 100%/0% opacity for 1-bit formats and 100%/66%/33%/0%
59 * opacity for 2-bit formats.
60 */
61#define MALIDP_ALPHA_LUT 0xffaa5500
62
63/* page sizes the MMU prefetcher can support */
64#define MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES (SZ_4K | SZ_64K)
65#define MALIDP_MMU_PREFETCH_FULL_PGSIZES (SZ_1M | SZ_2M)
66
67/* readahead for partial-frame prefetch */
68#define MALIDP_MMU_PREFETCH_READAHEAD 8
69
70static void malidp_de_plane_destroy(struct drm_plane *plane)
71{
72 struct malidp_plane *mp = to_malidp_plane(plane);
73
74 drm_plane_cleanup(plane);
75 kfree(mp);
76}
77
78/*
79 * Replicate what the default ->reset hook does: free the state pointer and
80 * allocate a new empty object. We just need enough space to store
81 * a malidp_plane_state instead of a drm_plane_state.
82 */
83static void malidp_plane_reset(struct drm_plane *plane)
84{
85 struct malidp_plane_state *state = to_malidp_plane_state(plane->state);
86
87 if (state)
88 __drm_atomic_helper_plane_destroy_state(&state->base);
89 kfree(state);
90 plane->state = NULL;
91 state = kzalloc(sizeof(*state), GFP_KERNEL);
92 if (state)
93 __drm_atomic_helper_plane_reset(plane, &state->base);
94}
95
96static struct
97drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
98{
99 struct malidp_plane_state *state, *m_state;
100
101 if (!plane->state)
102 return NULL;
103
104 state = kmalloc(sizeof(*state), GFP_KERNEL);
105 if (!state)
106 return NULL;
107
108 m_state = to_malidp_plane_state(plane->state);
109 __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
110 state->rotmem_size = m_state->rotmem_size;
111 state->format = m_state->format;
112 state->n_planes = m_state->n_planes;
113
114 state->mmu_prefetch_mode = m_state->mmu_prefetch_mode;
115 state->mmu_prefetch_pgsize = m_state->mmu_prefetch_pgsize;
116
117 return &state->base;
118}
119
120static void malidp_destroy_plane_state(struct drm_plane *plane,
121 struct drm_plane_state *state)
122{
123 struct malidp_plane_state *m_state = to_malidp_plane_state(state);
124
125 __drm_atomic_helper_plane_destroy_state(state);
126 kfree(m_state);
127}
128
129static const char * const prefetch_mode_names[] = {
130 [MALIDP_PREFETCH_MODE_NONE] = "MMU_PREFETCH_NONE",
131 [MALIDP_PREFETCH_MODE_PARTIAL] = "MMU_PREFETCH_PARTIAL",
132 [MALIDP_PREFETCH_MODE_FULL] = "MMU_PREFETCH_FULL",
133};
134
135static void malidp_plane_atomic_print_state(struct drm_printer *p,
136 const struct drm_plane_state *state)
137{
138 struct malidp_plane_state *ms = to_malidp_plane_state(state);
139
140 drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size);
141 drm_printf(p, "\tformat_id=%u\n", ms->format);
142 drm_printf(p, "\tn_planes=%u\n", ms->n_planes);
143 drm_printf(p, "\tmmu_prefetch_mode=%s\n",
144 prefetch_mode_names[ms->mmu_prefetch_mode]);
145 drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize);
146}
147
148bool malidp_format_mod_supported(struct drm_device *drm,
149 u32 format, u64 modifier)
150{
151 const struct drm_format_info *info;
152 const u64 *modifiers;
153 struct malidp_drm *malidp = drm->dev_private;
154 const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
155
156 if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
157 return false;
158
159 /* Some pixel formats are supported without any modifier */
160 if (modifier == DRM_FORMAT_MOD_LINEAR) {
161 /*
162 * However these pixel formats need to be supported with
163 * modifiers only
164 */
165 return !malidp_hw_format_is_afbc_only(format);
166 }
167
168 if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
169 DRM_ERROR("Unknown modifier (not Arm)\n");
170 return false;
171 }
172
173 if (modifier &
174 ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
175 DRM_DEBUG_KMS("Unsupported modifiers\n");
176 return false;
177 }
178
179 modifiers = malidp_format_modifiers;
180
181 /* SPLIT buffers must use SPARSE layout */
182 if (WARN_ON_ONCE((modifier & AFBC_SPLIT) && !(modifier & AFBC_SPARSE)))
183 return false;
184
185 /* CBR only applies to YUV formats, where YTR should be always 0 */
186 if (WARN_ON_ONCE((modifier & AFBC_CBR) && (modifier & AFBC_YTR)))
187 return false;
188
189 while (*modifiers != DRM_FORMAT_MOD_INVALID) {
190 if (*modifiers == modifier)
191 break;
192
193 modifiers++;
194 }
195
196 /* return false, if the modifier was not found */
197 if (*modifiers == DRM_FORMAT_MOD_INVALID) {
198 DRM_DEBUG_KMS("Unsupported modifier\n");
199 return false;
200 }
201
202 info = drm_format_info(format);
203
204 if (info->num_planes != 1) {
205 DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
206 return false;
207 }
208
209 if (malidp_hw_format_is_linear_only(format) == true) {
210 DRM_DEBUG_KMS("Given format (0x%x) is supported is linear mode only\n",
211 format);
212 return false;
213 }
214
215 /*
216 * RGB formats need to provide YTR modifier and YUV formats should not
217 * provide YTR modifier.
218 */
219 if (!(info->is_yuv) != !!(modifier & AFBC_FORMAT_MOD_YTR)) {
220 DRM_DEBUG_KMS("AFBC_FORMAT_MOD_YTR is %s for %s formats\n",
221 info->is_yuv ? "disallowed" : "mandatory",
222 info->is_yuv ? "YUV" : "RGB");
223 return false;
224 }
225
226 if (modifier & AFBC_SPLIT) {
227 if (!info->is_yuv) {
228 if (info->cpp[0] <= 2) {
229 DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n");
230 return false;
231 }
232 }
233
234 if ((info->hsub != 1) || (info->vsub != 1)) {
235 if (!(format == DRM_FORMAT_YUV420_10BIT &&
236 (map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) {
237 DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n");
238 return false;
239 }
240 }
241 }
242
243 if (modifier & AFBC_CBR) {
244 if ((info->hsub == 1) || (info->vsub == 1)) {
245 DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n");
246 return false;
247 }
248 }
249
250 return true;
251}
252
253static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane,
254 u32 format, u64 modifier)
255{
256 return malidp_format_mod_supported(plane->dev, format, modifier);
257}
258
259static const struct drm_plane_funcs malidp_de_plane_funcs = {
260 .update_plane = drm_atomic_helper_update_plane,
261 .disable_plane = drm_atomic_helper_disable_plane,
262 .destroy = malidp_de_plane_destroy,
263 .reset = malidp_plane_reset,
264 .atomic_duplicate_state = malidp_duplicate_plane_state,
265 .atomic_destroy_state = malidp_destroy_plane_state,
266 .atomic_print_state = malidp_plane_atomic_print_state,
267 .format_mod_supported = malidp_format_mod_supported_per_plane,
268};
269
270static int malidp_se_check_scaling(struct malidp_plane *mp,
271 struct drm_plane_state *state)
272{
273 struct drm_crtc_state *crtc_state =
274 drm_atomic_get_existing_crtc_state(state->state, state->crtc);
275 struct malidp_crtc_state *mc;
276 u32 src_w, src_h;
277 int ret;
278
279 if (!crtc_state)
280 return -EINVAL;
281
282 mc = to_malidp_crtc_state(crtc_state);
283
284 ret = drm_atomic_helper_check_plane_state(state, crtc_state,
285 0, INT_MAX, true, true);
286 if (ret)
287 return ret;
288
289 if (state->rotation & MALIDP_ROTATED_MASK) {
290 src_w = state->src_h >> 16;
291 src_h = state->src_w >> 16;
292 } else {
293 src_w = state->src_w >> 16;
294 src_h = state->src_h >> 16;
295 }
296
297 if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) {
298 /* Scaling not necessary for this plane. */
299 mc->scaled_planes_mask &= ~(mp->layer->id);
300 return 0;
301 }
302
303 if (mp->layer->id & (DE_SMART | DE_GRAPHICS2))
304 return -EINVAL;
305
306 mc->scaled_planes_mask |= mp->layer->id;
307 /* Defer scaling requirements calculation to the crtc check. */
308 return 0;
309}
310
311static u32 malidp_get_pgsize_bitmap(struct malidp_plane *mp)
312{
313 u32 pgsize_bitmap = 0;
314
315 if (iommu_present(&platform_bus_type)) {
316 struct iommu_domain *mmu_dom =
317 iommu_get_domain_for_dev(mp->base.dev->dev);
318
319 if (mmu_dom)
320 pgsize_bitmap = mmu_dom->pgsize_bitmap;
321 }
322
323 return pgsize_bitmap;
324}
325
326/*
327 * Check if the framebuffer is entirely made up of pages at least pgsize in
328 * size. Only a heuristic: assumes that each scatterlist entry has been aligned
329 * to the largest page size smaller than its length and that the MMU maps to
330 * the largest page size possible.
331 */
332static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
333 u32 pgsize)
334{
335 int i;
336
337 for (i = 0; i < ms->n_planes; i++) {
338 struct drm_gem_object *obj;
339 struct drm_gem_cma_object *cma_obj;
340 struct sg_table *sgt;
341 struct scatterlist *sgl;
342
343 obj = drm_gem_fb_get_obj(ms->base.fb, i);
344 cma_obj = to_drm_gem_cma_obj(obj);
345
346 if (cma_obj->sgt)
347 sgt = cma_obj->sgt;
348 else
349 sgt = obj->funcs->get_sg_table(obj);
350
351 if (!sgt)
352 return false;
353
354 sgl = sgt->sgl;
355
356 while (sgl) {
357 if (sgl->length < pgsize) {
358 if (!cma_obj->sgt)
359 kfree(sgt);
360 return false;
361 }
362
363 sgl = sg_next(sgl);
364 }
365 if (!cma_obj->sgt)
366 kfree(sgt);
367 }
368
369 return true;
370}
371
372/*
373 * Check if it is possible to enable partial-frame MMU prefetch given the
374 * current format, AFBC state and rotation.
375 */
376static bool malidp_partial_prefetch_supported(u32 format, u64 modifier,
377 unsigned int rotation)
378{
379 bool afbc, sparse;
380
381 /* rotation and horizontal flip not supported for partial prefetch */
382 if (rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
383 DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X))
384 return false;
385
386 afbc = modifier & DRM_FORMAT_MOD_ARM_AFBC(0);
387 sparse = modifier & AFBC_FORMAT_MOD_SPARSE;
388
389 switch (format) {
390 case DRM_FORMAT_ARGB2101010:
391 case DRM_FORMAT_RGBA1010102:
392 case DRM_FORMAT_BGRA1010102:
393 case DRM_FORMAT_ARGB8888:
394 case DRM_FORMAT_RGBA8888:
395 case DRM_FORMAT_BGRA8888:
396 case DRM_FORMAT_XRGB8888:
397 case DRM_FORMAT_XBGR8888:
398 case DRM_FORMAT_RGBX8888:
399 case DRM_FORMAT_BGRX8888:
400 case DRM_FORMAT_RGB888:
401 case DRM_FORMAT_RGBA5551:
402 case DRM_FORMAT_RGB565:
403 /* always supported */
404 return true;
405
406 case DRM_FORMAT_ABGR2101010:
407 case DRM_FORMAT_ABGR8888:
408 case DRM_FORMAT_ABGR1555:
409 case DRM_FORMAT_BGR565:
410 /* supported, but if AFBC then must be sparse mode */
411 return (!afbc) || (afbc && sparse);
412
413 case DRM_FORMAT_BGR888:
414 /* supported, but not for AFBC */
415 return !afbc;
416
417 case DRM_FORMAT_YUYV:
418 case DRM_FORMAT_UYVY:
419 case DRM_FORMAT_NV12:
420 case DRM_FORMAT_YUV420:
421 /* not supported */
422 return false;
423
424 default:
425 return false;
426 }
427}
428
429/*
430 * Select the preferred MMU prefetch mode. Full-frame prefetch is preferred as
431 * long as the framebuffer is all large pages. Otherwise partial-frame prefetch
432 * is selected as long as it is supported for the current format. The selected
433 * page size for prefetch is returned in pgsize_bitmap.
434 */
435static enum mmu_prefetch_mode malidp_mmu_prefetch_select_mode
436 (struct malidp_plane_state *ms, u32 *pgsize_bitmap)
437{
438 u32 pgsizes;
439
440 /* get the full-frame prefetch page size(s) supported by the MMU */
441 pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_FULL_PGSIZES;
442
443 while (pgsizes) {
444 u32 largest_pgsize = 1 << __fls(pgsizes);
445
446 if (malidp_check_pages_threshold(ms, largest_pgsize)) {
447 *pgsize_bitmap = largest_pgsize;
448 return MALIDP_PREFETCH_MODE_FULL;
449 }
450
451 pgsizes -= largest_pgsize;
452 }
453
454 /* get the partial-frame prefetch page size(s) supported by the MMU */
455 pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES;
456
457 if (malidp_partial_prefetch_supported(ms->base.fb->format->format,
458 ms->base.fb->modifier,
459 ms->base.rotation)) {
460 /* partial prefetch using the smallest page size */
461 *pgsize_bitmap = 1 << __ffs(pgsizes);
462 return MALIDP_PREFETCH_MODE_PARTIAL;
463 }
464 *pgsize_bitmap = 0;
465 return MALIDP_PREFETCH_MODE_NONE;
466}
467
468static u32 malidp_calc_mmu_control_value(enum mmu_prefetch_mode mode,
469 u8 readahead, u8 n_planes, u32 pgsize)
470{
471 u32 mmu_ctrl = 0;
472
473 if (mode != MALIDP_PREFETCH_MODE_NONE) {
474 mmu_ctrl |= MALIDP_MMU_CTRL_EN;
475
476 if (mode == MALIDP_PREFETCH_MODE_PARTIAL) {
477 mmu_ctrl |= MALIDP_MMU_CTRL_MODE;
478 mmu_ctrl |= MALIDP_MMU_CTRL_PP_NUM_REQ(readahead);
479 }
480
481 if (pgsize == SZ_64K || pgsize == SZ_2M) {
482 int i;
483
484 for (i = 0; i < n_planes; i++)
485 mmu_ctrl |= MALIDP_MMU_CTRL_PX_PS(i);
486 }
487 }
488
489 return mmu_ctrl;
490}
491
492static void malidp_de_prefetch_settings(struct malidp_plane *mp,
493 struct malidp_plane_state *ms)
494{
495 if (!mp->layer->mmu_ctrl_offset)
496 return;
497
498 /* get the page sizes supported by the MMU */
499 ms->mmu_prefetch_pgsize = malidp_get_pgsize_bitmap(mp);
500 ms->mmu_prefetch_mode =
501 malidp_mmu_prefetch_select_mode(ms, &ms->mmu_prefetch_pgsize);
502}
503
504static int malidp_de_plane_check(struct drm_plane *plane,
505 struct drm_atomic_state *state)
506{
507 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
508 plane);
509 struct malidp_plane *mp = to_malidp_plane(plane);
510 struct malidp_plane_state *ms = to_malidp_plane_state(new_plane_state);
511 bool rotated = new_plane_state->rotation & MALIDP_ROTATED_MASK;
512 struct drm_framebuffer *fb;
513 u16 pixel_alpha = new_plane_state->pixel_blend_mode;
514 int i, ret;
515 unsigned int block_w, block_h;
516
517 if (!new_plane_state->crtc || WARN_ON(!new_plane_state->fb))
518 return 0;
519
520 fb = new_plane_state->fb;
521
522 ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
523 mp->layer->id, fb->format->format,
524 !!fb->modifier);
525 if (ms->format == MALIDP_INVALID_FORMAT_ID)
526 return -EINVAL;
527
528 ms->n_planes = fb->format->num_planes;
529 for (i = 0; i < ms->n_planes; i++) {
530 u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
531
532 if (((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
533 & (alignment - 1)) && !(fb->modifier)) {
534 DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
535 fb->pitches[i], i);
536 return -EINVAL;
537 }
538 }
539
540 block_w = drm_format_info_block_width(fb->format, 0);
541 block_h = drm_format_info_block_height(fb->format, 0);
542 if (fb->width % block_w || fb->height % block_h) {
543 DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes");
544 return -EINVAL;
545 }
546 if ((new_plane_state->src_x >> 16) % block_w || (new_plane_state->src_y >> 16) % block_h) {
547 DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes");
548 return -EINVAL;
549 }
550
551 if ((new_plane_state->crtc_w > mp->hwdev->max_line_size) ||
552 (new_plane_state->crtc_h > mp->hwdev->max_line_size) ||
553 (new_plane_state->crtc_w < mp->hwdev->min_line_size) ||
554 (new_plane_state->crtc_h < mp->hwdev->min_line_size))
555 return -EINVAL;
556
557 /*
558 * DP550/650 video layers can accept 3 plane formats only if
559 * fb->pitches[1] == fb->pitches[2] since they don't have a
560 * third plane stride register.
561 */
562 if (ms->n_planes == 3 &&
563 !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
564 (new_plane_state->fb->pitches[1] != new_plane_state->fb->pitches[2]))
565 return -EINVAL;
566
567 ret = malidp_se_check_scaling(mp, new_plane_state);
568 if (ret)
569 return ret;
570
571 /* validate the rotation constraints for each layer */
572 if (new_plane_state->rotation != DRM_MODE_ROTATE_0) {
573 if (mp->layer->rot == ROTATE_NONE)
574 return -EINVAL;
575 if ((mp->layer->rot == ROTATE_COMPRESSED) && !(fb->modifier))
576 return -EINVAL;
577 /*
578 * packed RGB888 / BGR888 can't be rotated or flipped
579 * unless they are stored in a compressed way
580 */
581 if ((fb->format->format == DRM_FORMAT_RGB888 ||
582 fb->format->format == DRM_FORMAT_BGR888) && !(fb->modifier))
583 return -EINVAL;
584 }
585
586 /* SMART layer does not support AFBC */
587 if (mp->layer->id == DE_SMART && fb->modifier) {
588 DRM_ERROR("AFBC framebuffer not supported in SMART layer");
589 return -EINVAL;
590 }
591
592 ms->rotmem_size = 0;
593 if (new_plane_state->rotation & MALIDP_ROTATED_MASK) {
594 int val;
595
596 val = mp->hwdev->hw->rotmem_required(mp->hwdev, new_plane_state->crtc_w,
597 new_plane_state->crtc_h,
598 fb->format->format,
599 !!(fb->modifier));
600 if (val < 0)
601 return val;
602
603 ms->rotmem_size = val;
604 }
605
606 /* HW can't support plane + pixel blending */
607 if ((new_plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE) &&
608 (pixel_alpha != DRM_MODE_BLEND_PIXEL_NONE) &&
609 fb->format->has_alpha)
610 return -EINVAL;
611
612 malidp_de_prefetch_settings(mp, ms);
613
614 return 0;
615}
616
617static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
618 int num_planes, unsigned int pitches[3])
619{
620 int i;
621 int num_strides = num_planes;
622
623 if (!mp->layer->stride_offset)
624 return;
625
626 if (num_planes == 3)
627 num_strides = (mp->hwdev->hw->features &
628 MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
629
630 /*
631 * The drm convention for pitch is that it needs to cover width * cpp,
632 * but our hardware wants the pitch/stride to cover all rows included
633 * in a tile.
634 */
635 for (i = 0; i < num_strides; ++i) {
636 unsigned int block_h = drm_format_info_block_height(mp->base.state->fb->format, i);
637
638 malidp_hw_write(mp->hwdev, pitches[i] * block_h,
639 mp->layer->base +
640 mp->layer->stride_offset + i * 4);
641 }
642}
643
644static const s16
645malidp_yuv2rgb_coeffs[][DRM_COLOR_RANGE_MAX][MALIDP_COLORADJ_NUM_COEFFS] = {
646 [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
647 1192, 0, 1634,
648 1192, -401, -832,
649 1192, 2066, 0,
650 64, 512, 512
651 },
652 [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = {
653 1024, 0, 1436,
654 1024, -352, -731,
655 1024, 1815, 0,
656 0, 512, 512
657 },
658 [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
659 1192, 0, 1836,
660 1192, -218, -546,
661 1192, 2163, 0,
662 64, 512, 512
663 },
664 [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = {
665 1024, 0, 1613,
666 1024, -192, -479,
667 1024, 1900, 0,
668 0, 512, 512
669 },
670 [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
671 1024, 0, 1476,
672 1024, -165, -572,
673 1024, 1884, 0,
674 0, 512, 512
675 },
676 [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_FULL_RANGE] = {
677 1024, 0, 1510,
678 1024, -168, -585,
679 1024, 1927, 0,
680 0, 512, 512
681 }
682};
683
684static void malidp_de_set_color_encoding(struct malidp_plane *plane,
685 enum drm_color_encoding enc,
686 enum drm_color_range range)
687{
688 unsigned int i;
689
690 for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
691 /* coefficients are signed, two's complement values */
692 malidp_hw_write(plane->hwdev, malidp_yuv2rgb_coeffs[enc][range][i],
693 plane->layer->base + plane->layer->yuv2rgb_offset +
694 i * 4);
695 }
696}
697
698static void malidp_de_set_mmu_control(struct malidp_plane *mp,
699 struct malidp_plane_state *ms)
700{
701 u32 mmu_ctrl;
702
703 /* check hardware supports MMU prefetch */
704 if (!mp->layer->mmu_ctrl_offset)
705 return;
706
707 mmu_ctrl = malidp_calc_mmu_control_value(ms->mmu_prefetch_mode,
708 MALIDP_MMU_PREFETCH_READAHEAD,
709 ms->n_planes,
710 ms->mmu_prefetch_pgsize);
711
712 malidp_hw_write(mp->hwdev, mmu_ctrl,
713 mp->layer->base + mp->layer->mmu_ctrl_offset);
714}
715
716static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
717 struct malidp_plane *mp,
718 int plane_index)
719{
720 dma_addr_t paddr;
721 u16 ptr;
722 struct drm_plane *plane = &mp->base;
723 bool afbc = fb->modifier ? true : false;
724
725 ptr = mp->layer->ptr + (plane_index << 4);
726
727 /*
728 * drm_fb_cma_get_gem_addr() alters the physical base address of the
729 * framebuffer as per the plane's src_x, src_y co-ordinates (ie to
730 * take care of source cropping).
731 * For AFBC, this is not needed as the cropping is handled by _AD_CROP_H
732 * and _AD_CROP_V registers.
733 */
734 if (!afbc) {
735 paddr = drm_fb_cma_get_gem_addr(fb, plane->state,
736 plane_index);
737 } else {
738 struct drm_gem_cma_object *obj;
739
740 obj = drm_fb_cma_get_gem_obj(fb, plane_index);
741
742 if (WARN_ON(!obj))
743 return;
744 paddr = obj->paddr;
745 }
746
747 malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr);
748 malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4);
749}
750
751static void malidp_de_set_plane_afbc(struct drm_plane *plane)
752{
753 struct malidp_plane *mp;
754 u32 src_w, src_h, val = 0, src_x, src_y;
755 struct drm_framebuffer *fb = plane->state->fb;
756
757 mp = to_malidp_plane(plane);
758
759 /* no afbc_decoder_offset means AFBC is not supported on this plane */
760 if (!mp->layer->afbc_decoder_offset)
761 return;
762
763 if (!fb->modifier) {
764 malidp_hw_write(mp->hwdev, 0, mp->layer->afbc_decoder_offset);
765 return;
766 }
767
768 /* convert src values from Q16 fixed point to integer */
769 src_w = plane->state->src_w >> 16;
770 src_h = plane->state->src_h >> 16;
771 src_x = plane->state->src_x >> 16;
772 src_y = plane->state->src_y >> 16;
773
774 val = ((fb->width - (src_x + src_w)) << MALIDP_AD_CROP_RIGHT_OFFSET) |
775 src_x;
776 malidp_hw_write(mp->hwdev, val,
777 mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_H);
778
779 val = ((fb->height - (src_y + src_h)) << MALIDP_AD_CROP_BOTTOM_OFFSET) |
780 src_y;
781 malidp_hw_write(mp->hwdev, val,
782 mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_V);
783
784 val = MALIDP_AD_EN;
785 if (fb->modifier & AFBC_FORMAT_MOD_SPLIT)
786 val |= MALIDP_AD_BS;
787 if (fb->modifier & AFBC_FORMAT_MOD_YTR)
788 val |= MALIDP_AD_YTR;
789
790 malidp_hw_write(mp->hwdev, val, mp->layer->afbc_decoder_offset);
791}
792
793static void malidp_de_plane_update(struct drm_plane *plane,
794 struct drm_atomic_state *state)
795{
796 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
797 plane);
798 struct malidp_plane *mp;
799 struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
800 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
801 plane);
802 u16 pixel_alpha = new_state->pixel_blend_mode;
803 u8 plane_alpha = new_state->alpha >> 8;
804 u32 src_w, src_h, dest_w, dest_h, val;
805 int i;
806 struct drm_framebuffer *fb = plane->state->fb;
807
808 mp = to_malidp_plane(plane);
809
810 /*
811 * For AFBC framebuffer, use the framebuffer width and height for
812 * configuring layer input size register.
813 */
814 if (fb->modifier) {
815 src_w = fb->width;
816 src_h = fb->height;
817 } else {
818 /* convert src values from Q16 fixed point to integer */
819 src_w = new_state->src_w >> 16;
820 src_h = new_state->src_h >> 16;
821 }
822
823 dest_w = new_state->crtc_w;
824 dest_h = new_state->crtc_h;
825
826 val = malidp_hw_read(mp->hwdev, mp->layer->base);
827 val = (val & ~LAYER_FORMAT_MASK) | ms->format;
828 malidp_hw_write(mp->hwdev, val, mp->layer->base);
829
830 for (i = 0; i < ms->n_planes; i++)
831 malidp_set_plane_base_addr(fb, mp, i);
832
833 malidp_de_set_mmu_control(mp, ms);
834
835 malidp_de_set_plane_pitches(mp, ms->n_planes,
836 new_state->fb->pitches);
837
838 if ((plane->state->color_encoding != old_state->color_encoding) ||
839 (plane->state->color_range != old_state->color_range))
840 malidp_de_set_color_encoding(mp, plane->state->color_encoding,
841 plane->state->color_range);
842
843 malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
844 mp->layer->base + MALIDP_LAYER_SIZE);
845
846 malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
847 mp->layer->base + MALIDP_LAYER_COMP_SIZE);
848
849 malidp_hw_write(mp->hwdev, LAYER_H_VAL(new_state->crtc_x) |
850 LAYER_V_VAL(new_state->crtc_y),
851 mp->layer->base + MALIDP_LAYER_OFFSET);
852
853 if (mp->layer->id == DE_SMART) {
854 /*
855 * Enable the first rectangle in the SMART layer to be
856 * able to use it as a drm plane.
857 */
858 malidp_hw_write(mp->hwdev, 1,
859 mp->layer->base + MALIDP550_LS_ENABLE);
860 malidp_hw_write(mp->hwdev,
861 LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
862 mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
863 }
864
865 malidp_de_set_plane_afbc(plane);
866
867 /* first clear the rotation bits */
868 val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
869 val &= ~LAYER_ROT_MASK;
870
871 /* setup the rotation and axis flip bits */
872 if (new_state->rotation & DRM_MODE_ROTATE_MASK)
873 val |= ilog2(plane->state->rotation & DRM_MODE_ROTATE_MASK) <<
874 LAYER_ROT_OFFSET;
875 if (new_state->rotation & DRM_MODE_REFLECT_X)
876 val |= LAYER_H_FLIP;
877 if (new_state->rotation & DRM_MODE_REFLECT_Y)
878 val |= LAYER_V_FLIP;
879
880 val &= ~(LAYER_COMP_MASK | LAYER_PMUL_ENABLE | LAYER_ALPHA(0xff));
881
882 if (new_state->alpha != DRM_BLEND_ALPHA_OPAQUE) {
883 val |= LAYER_COMP_PLANE;
884 } else if (new_state->fb->format->has_alpha) {
885 /* We only care about blend mode if the format has alpha */
886 switch (pixel_alpha) {
887 case DRM_MODE_BLEND_PREMULTI:
888 val |= LAYER_COMP_PIXEL | LAYER_PMUL_ENABLE;
889 break;
890 case DRM_MODE_BLEND_COVERAGE:
891 val |= LAYER_COMP_PIXEL;
892 break;
893 }
894 }
895 val |= LAYER_ALPHA(plane_alpha);
896
897 val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
898 if (new_state->crtc) {
899 struct malidp_crtc_state *m =
900 to_malidp_crtc_state(new_state->crtc->state);
901
902 if (m->scaler_config.scale_enable &&
903 m->scaler_config.plane_src_id == mp->layer->id)
904 val |= LAYER_FLOWCFG(LAYER_FLOWCFG_SCALE_SE);
905 }
906
907 /* set the 'enable layer' bit */
908 val |= LAYER_ENABLE;
909
910 malidp_hw_write(mp->hwdev, val,
911 mp->layer->base + MALIDP_LAYER_CONTROL);
912}
913
914static void malidp_de_plane_disable(struct drm_plane *plane,
915 struct drm_atomic_state *state)
916{
917 struct malidp_plane *mp = to_malidp_plane(plane);
918
919 malidp_hw_clearbits(mp->hwdev,
920 LAYER_ENABLE | LAYER_FLOWCFG(LAYER_FLOWCFG_MASK),
921 mp->layer->base + MALIDP_LAYER_CONTROL);
922}
923
924static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
925 .atomic_check = malidp_de_plane_check,
926 .atomic_update = malidp_de_plane_update,
927 .atomic_disable = malidp_de_plane_disable,
928};
929
930static const uint64_t linear_only_modifiers[] = {
931 DRM_FORMAT_MOD_LINEAR,
932 DRM_FORMAT_MOD_INVALID
933};
934
935int malidp_de_planes_init(struct drm_device *drm)
936{
937 struct malidp_drm *malidp = drm->dev_private;
938 const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
939 struct malidp_plane *plane = NULL;
940 enum drm_plane_type plane_type;
941 unsigned long crtcs = BIT(drm->mode_config.num_crtc);
942 unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
943 DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
944 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
945 BIT(DRM_MODE_BLEND_PREMULTI) |
946 BIT(DRM_MODE_BLEND_COVERAGE);
947 u32 *formats;
948 int ret, i = 0, j = 0, n;
949 u64 supported_modifiers[MODIFIERS_COUNT_MAX];
950 const u64 *modifiers;
951
952 modifiers = malidp_format_modifiers;
953
954 if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) {
955 /*
956 * Since our hardware does not support SPLIT, so build the list
957 * of supported modifiers excluding SPLIT ones.
958 */
959 while (*modifiers != DRM_FORMAT_MOD_INVALID) {
960 if (!(*modifiers & AFBC_SPLIT))
961 supported_modifiers[j++] = *modifiers;
962
963 modifiers++;
964 }
965 supported_modifiers[j++] = DRM_FORMAT_MOD_INVALID;
966 modifiers = supported_modifiers;
967 }
968
969 formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL);
970 if (!formats) {
971 ret = -ENOMEM;
972 goto cleanup;
973 }
974
975 for (i = 0; i < map->n_layers; i++) {
976 u8 id = map->layers[i].id;
977
978 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
979 if (!plane) {
980 ret = -ENOMEM;
981 goto cleanup;
982 }
983
984 /* build the list of DRM supported formats based on the map */
985 for (n = 0, j = 0; j < map->n_pixel_formats; j++) {
986 if ((map->pixel_formats[j].layer & id) == id)
987 formats[n++] = map->pixel_formats[j].format;
988 }
989
990 plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
991 DRM_PLANE_TYPE_OVERLAY;
992
993 /*
994 * All the layers except smart layer supports AFBC modifiers.
995 */
996 ret = drm_universal_plane_init(drm, &plane->base, crtcs,
997 &malidp_de_plane_funcs, formats, n,
998 (id == DE_SMART) ? linear_only_modifiers : modifiers,
999 plane_type, NULL);
1000
1001 if (ret < 0)
1002 goto cleanup;
1003
1004 drm_plane_helper_add(&plane->base,
1005 &malidp_de_plane_helper_funcs);
1006 plane->hwdev = malidp->dev;
1007 plane->layer = &map->layers[i];
1008
1009 drm_plane_create_alpha_property(&plane->base);
1010 drm_plane_create_blend_mode_property(&plane->base, blend_caps);
1011
1012 if (id == DE_SMART) {
1013 /* Skip the features which the SMART layer doesn't have. */
1014 continue;
1015 }
1016
1017 drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, flags);
1018 malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
1019 plane->layer->base + MALIDP_LAYER_COMPOSE);
1020
1021 /* Attach the YUV->RGB property only to video layers */
1022 if (id & (DE_VIDEO1 | DE_VIDEO2)) {
1023 /* default encoding for YUV->RGB is BT601 NARROW */
1024 enum drm_color_encoding enc = DRM_COLOR_YCBCR_BT601;
1025 enum drm_color_range range = DRM_COLOR_YCBCR_LIMITED_RANGE;
1026
1027 ret = drm_plane_create_color_properties(&plane->base,
1028 BIT(DRM_COLOR_YCBCR_BT601) | \
1029 BIT(DRM_COLOR_YCBCR_BT709) | \
1030 BIT(DRM_COLOR_YCBCR_BT2020),
1031 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | \
1032 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1033 enc, range);
1034 if (!ret)
1035 /* program the HW registers */
1036 malidp_de_set_color_encoding(plane, enc, range);
1037 else
1038 DRM_WARN("Failed to create video layer %d color properties\n", id);
1039 }
1040 }
1041
1042 kfree(formats);
1043
1044 return 0;
1045
1046cleanup:
1047 kfree(formats);
1048
1049 return ret;
1050}