Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2015 Free Electrons
4 * Copyright (C) 2015 NextThing Co
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9#include <linux/component.h>
10#include <linux/list.h>
11#include <linux/module.h>
12#include <linux/of_device.h>
13#include <linux/of_graph.h>
14#include <linux/platform_device.h>
15#include <linux/reset.h>
16
17#include <drm/drm_atomic.h>
18#include <drm/drm_atomic_helper.h>
19#include <drm/drm_crtc.h>
20#include <drm/drm_fb_cma_helper.h>
21#include <drm/drm_fourcc.h>
22#include <drm/drm_gem_cma_helper.h>
23#include <drm/drm_plane_helper.h>
24#include <drm/drm_probe_helper.h>
25
26#include "sun4i_backend.h"
27#include "sun4i_drv.h"
28#include "sun4i_frontend.h"
29#include "sun4i_layer.h"
30#include "sunxi_engine.h"
31
32struct sun4i_backend_quirks {
33 /* backend <-> TCON muxing selection done in backend */
34 bool needs_output_muxing;
35
36 /* alpha at the lowest z position is not always supported */
37 bool supports_lowest_plane_alpha;
38};
39
40static const u32 sunxi_rgb2yuv_coef[12] = {
41 0x00000107, 0x00000204, 0x00000064, 0x00000108,
42 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
43 0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
44};
45
46static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
47{
48 int i;
49
50 DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
51
52 /* Set color correction */
53 regmap_write(engine->regs, SUN4I_BACKEND_OCCTL_REG,
54 SUN4I_BACKEND_OCCTL_ENABLE);
55
56 for (i = 0; i < 12; i++)
57 regmap_write(engine->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
58 sunxi_rgb2yuv_coef[i]);
59}
60
61static void sun4i_backend_disable_color_correction(struct sunxi_engine *engine)
62{
63 DRM_DEBUG_DRIVER("Disabling color correction\n");
64
65 /* Disable color correction */
66 regmap_update_bits(engine->regs, SUN4I_BACKEND_OCCTL_REG,
67 SUN4I_BACKEND_OCCTL_ENABLE, 0);
68}
69
70static void sun4i_backend_commit(struct sunxi_engine *engine)
71{
72 DRM_DEBUG_DRIVER("Committing changes\n");
73
74 regmap_write(engine->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
75 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
76 SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
77}
78
79void sun4i_backend_layer_enable(struct sun4i_backend *backend,
80 int layer, bool enable)
81{
82 u32 val;
83
84 DRM_DEBUG_DRIVER("%sabling layer %d\n", enable ? "En" : "Dis",
85 layer);
86
87 if (enable)
88 val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
89 else
90 val = 0;
91
92 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
93 SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
94}
95
96static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
97{
98 switch (format) {
99 case DRM_FORMAT_ARGB8888:
100 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
101 break;
102
103 case DRM_FORMAT_ARGB4444:
104 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
105 break;
106
107 case DRM_FORMAT_ARGB1555:
108 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
109 break;
110
111 case DRM_FORMAT_RGBA5551:
112 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
113 break;
114
115 case DRM_FORMAT_RGBA4444:
116 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
117 break;
118
119 case DRM_FORMAT_XRGB8888:
120 *mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
121 break;
122
123 case DRM_FORMAT_RGB888:
124 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
125 break;
126
127 case DRM_FORMAT_RGB565:
128 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
129 break;
130
131 default:
132 return -EINVAL;
133 }
134
135 return 0;
136}
137
138static const uint32_t sun4i_backend_formats[] = {
139 DRM_FORMAT_ARGB1555,
140 DRM_FORMAT_ARGB4444,
141 DRM_FORMAT_ARGB8888,
142 DRM_FORMAT_RGB565,
143 DRM_FORMAT_RGB888,
144 DRM_FORMAT_RGBA4444,
145 DRM_FORMAT_RGBA5551,
146 DRM_FORMAT_UYVY,
147 DRM_FORMAT_VYUY,
148 DRM_FORMAT_XRGB8888,
149 DRM_FORMAT_YUYV,
150 DRM_FORMAT_YVYU,
151};
152
153bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
154{
155 unsigned int i;
156
157 if (modifier != DRM_FORMAT_MOD_LINEAR)
158 return false;
159
160 for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
161 if (sun4i_backend_formats[i] == fmt)
162 return true;
163
164 return false;
165}
166
167int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
168 int layer, struct drm_plane *plane)
169{
170 struct drm_plane_state *state = plane->state;
171
172 DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
173
174 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
175 DRM_DEBUG_DRIVER("Primary layer, updating global size W: %u H: %u\n",
176 state->crtc_w, state->crtc_h);
177 regmap_write(backend->engine.regs, SUN4I_BACKEND_DISSIZE_REG,
178 SUN4I_BACKEND_DISSIZE(state->crtc_w,
179 state->crtc_h));
180 }
181
182 /* Set height and width */
183 DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
184 state->crtc_w, state->crtc_h);
185 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
186 SUN4I_BACKEND_LAYSIZE(state->crtc_w,
187 state->crtc_h));
188
189 /* Set base coordinates */
190 DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
191 state->crtc_x, state->crtc_y);
192 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
193 SUN4I_BACKEND_LAYCOOR(state->crtc_x,
194 state->crtc_y));
195
196 return 0;
197}
198
199static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
200 int layer, struct drm_plane *plane)
201{
202 struct drm_plane_state *state = plane->state;
203 struct drm_framebuffer *fb = state->fb;
204 const struct drm_format_info *format = fb->format;
205 const uint32_t fmt = format->format;
206 u32 val = SUN4I_BACKEND_IYUVCTL_EN;
207 int i;
208
209 for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
210 regmap_write(backend->engine.regs,
211 SUN4I_BACKEND_YGCOEF_REG(i),
212 sunxi_bt601_yuv2rgb_coef[i]);
213
214 /*
215 * We should do that only for a single plane, but the
216 * framebuffer's atomic_check has our back on this.
217 */
218 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
219 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
220 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
221
222 /* TODO: Add support for the multi-planar YUV formats */
223 if (drm_format_info_is_yuv_packed(format) &&
224 drm_format_info_is_yuv_sampling_422(format))
225 val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
226 else
227 DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
228
229 /*
230 * Allwinner seems to list the pixel sequence from right to left, while
231 * DRM lists it from left to right.
232 */
233 switch (fmt) {
234 case DRM_FORMAT_YUYV:
235 val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
236 break;
237 case DRM_FORMAT_YVYU:
238 val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
239 break;
240 case DRM_FORMAT_UYVY:
241 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
242 break;
243 case DRM_FORMAT_VYUY:
244 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
245 break;
246 default:
247 DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
248 fmt);
249 }
250
251 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
252
253 return 0;
254}
255
256int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
257 int layer, struct drm_plane *plane)
258{
259 struct drm_plane_state *state = plane->state;
260 struct drm_framebuffer *fb = state->fb;
261 bool interlaced = false;
262 u32 val;
263 int ret;
264
265 /* Clear the YUV mode */
266 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
267 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
268
269 if (plane->state->crtc)
270 interlaced = plane->state->crtc->state->adjusted_mode.flags
271 & DRM_MODE_FLAG_INTERLACE;
272
273 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
274 SUN4I_BACKEND_MODCTL_ITLMOD_EN,
275 interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
276
277 DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
278 interlaced ? "on" : "off");
279
280 val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
281 if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
282 val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
283 regmap_update_bits(backend->engine.regs,
284 SUN4I_BACKEND_ATTCTL_REG0(layer),
285 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
286 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
287 val);
288
289 if (fb->format->is_yuv)
290 return sun4i_backend_update_yuv_format(backend, layer, plane);
291
292 ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
293 if (ret) {
294 DRM_DEBUG_DRIVER("Invalid format\n");
295 return ret;
296 }
297
298 regmap_update_bits(backend->engine.regs,
299 SUN4I_BACKEND_ATTCTL_REG1(layer),
300 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
301
302 return 0;
303}
304
305int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
306 int layer, uint32_t fmt)
307{
308 u32 val;
309 int ret;
310
311 ret = sun4i_backend_drm_format_to_layer(fmt, &val);
312 if (ret) {
313 DRM_DEBUG_DRIVER("Invalid format\n");
314 return ret;
315 }
316
317 regmap_update_bits(backend->engine.regs,
318 SUN4I_BACKEND_ATTCTL_REG0(layer),
319 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN,
320 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN);
321
322 regmap_update_bits(backend->engine.regs,
323 SUN4I_BACKEND_ATTCTL_REG1(layer),
324 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
325
326 return 0;
327}
328
329static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
330 struct drm_framebuffer *fb,
331 dma_addr_t paddr)
332{
333 /* TODO: Add support for the multi-planar YUV formats */
334 DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
335 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
336
337 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
338 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
339 fb->pitches[0] * 8);
340
341 return 0;
342}
343
344int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
345 int layer, struct drm_plane *plane)
346{
347 struct drm_plane_state *state = plane->state;
348 struct drm_framebuffer *fb = state->fb;
349 u32 lo_paddr, hi_paddr;
350 dma_addr_t paddr;
351
352 /* Set the line width */
353 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
354 regmap_write(backend->engine.regs,
355 SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
356 fb->pitches[0] * 8);
357
358 /* Get the start of the displayed memory */
359 paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
360 DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
361
362 if (fb->format->is_yuv)
363 return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
364
365 /* Write the 32 lower bits of the address (in bits) */
366 lo_paddr = paddr << 3;
367 DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
368 regmap_write(backend->engine.regs,
369 SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
370 lo_paddr);
371
372 /* And the upper bits */
373 hi_paddr = paddr >> 29;
374 DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
375 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
376 SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
377 SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
378
379 return 0;
380}
381
382int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
383 struct drm_plane *plane)
384{
385 struct drm_plane_state *state = plane->state;
386 struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state);
387 unsigned int priority = state->normalized_zpos;
388 unsigned int pipe = p_state->pipe;
389
390 DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n",
391 layer, priority, pipe);
392 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
393 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK |
394 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK,
395 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) |
396 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority));
397
398 return 0;
399}
400
401void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
402 int layer)
403{
404 regmap_update_bits(backend->engine.regs,
405 SUN4I_BACKEND_ATTCTL_REG0(layer),
406 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
407 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
408}
409
410static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
411{
412 u16 src_h = state->src_h >> 16;
413 u16 src_w = state->src_w >> 16;
414
415 DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n",
416 src_w, src_h, state->crtc_w, state->crtc_h);
417
418 if ((state->crtc_h != src_h) || (state->crtc_w != src_w))
419 return true;
420
421 return false;
422}
423
424static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
425{
426 struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
427 struct sun4i_backend *backend = layer->backend;
428 uint32_t format = state->fb->format->format;
429 uint64_t modifier = state->fb->modifier;
430
431 if (IS_ERR(backend->frontend))
432 return false;
433
434 if (!sun4i_frontend_format_is_supported(format, modifier))
435 return false;
436
437 if (!sun4i_backend_format_is_supported(format, modifier))
438 return true;
439
440 /*
441 * TODO: The backend alone allows 2x and 4x integer scaling, including
442 * support for an alpha component (which the frontend doesn't support).
443 * Use the backend directly instead of the frontend in this case, with
444 * another test to return false.
445 */
446
447 if (sun4i_backend_plane_uses_scaler(state))
448 return true;
449
450 /*
451 * Here the format is supported by both the frontend and the backend
452 * and no frontend scaling is required, so use the backend directly.
453 */
454 return false;
455}
456
457static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
458 bool *uses_frontend)
459{
460 if (sun4i_backend_plane_uses_frontend(state)) {
461 *uses_frontend = true;
462 return true;
463 }
464
465 *uses_frontend = false;
466
467 /* Scaling is not supported without the frontend. */
468 if (sun4i_backend_plane_uses_scaler(state))
469 return false;
470
471 return true;
472}
473
474static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
475 struct drm_crtc_state *old_state)
476{
477 u32 val;
478
479 WARN_ON(regmap_read_poll_timeout(engine->regs,
480 SUN4I_BACKEND_REGBUFFCTL_REG,
481 val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL),
482 100, 50000));
483}
484
485static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
486 struct drm_crtc_state *crtc_state)
487{
488 struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
489 struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
490 struct drm_atomic_state *state = crtc_state->state;
491 struct drm_device *drm = state->dev;
492 struct drm_plane *plane;
493 unsigned int num_planes = 0;
494 unsigned int num_alpha_planes = 0;
495 unsigned int num_frontend_planes = 0;
496 unsigned int num_alpha_planes_max = 1;
497 unsigned int num_yuv_planes = 0;
498 unsigned int current_pipe = 0;
499 unsigned int i;
500
501 DRM_DEBUG_DRIVER("Starting checking our planes\n");
502
503 if (!crtc_state->planes_changed)
504 return 0;
505
506 drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) {
507 struct drm_plane_state *plane_state =
508 drm_atomic_get_plane_state(state, plane);
509 struct sun4i_layer_state *layer_state =
510 state_to_sun4i_layer_state(plane_state);
511 struct drm_framebuffer *fb = plane_state->fb;
512 struct drm_format_name_buf format_name;
513
514 if (!sun4i_backend_plane_is_supported(plane_state,
515 &layer_state->uses_frontend))
516 return -EINVAL;
517
518 if (layer_state->uses_frontend) {
519 DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
520 plane->index);
521 num_frontend_planes++;
522 } else {
523 if (fb->format->is_yuv) {
524 DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
525 num_yuv_planes++;
526 }
527 }
528
529 DRM_DEBUG_DRIVER("Plane FB format is %s\n",
530 drm_get_format_name(fb->format->format,
531 &format_name));
532 if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
533 num_alpha_planes++;
534
535 DRM_DEBUG_DRIVER("Plane zpos is %d\n",
536 plane_state->normalized_zpos);
537
538 /* Sort our planes by Zpos */
539 plane_states[plane_state->normalized_zpos] = plane_state;
540
541 num_planes++;
542 }
543
544 /* All our planes were disabled, bail out */
545 if (!num_planes)
546 return 0;
547
548 /*
549 * The hardware is a bit unusual here.
550 *
551 * Even though it supports 4 layers, it does the composition
552 * in two separate steps.
553 *
554 * The first one is assigning a layer to one of its two
555 * pipes. If more that 1 layer is assigned to the same pipe,
556 * and if pixels overlaps, the pipe will take the pixel from
557 * the layer with the highest priority.
558 *
559 * The second step is the actual alpha blending, that takes
560 * the two pipes as input, and uses the potential alpha
561 * component to do the transparency between the two.
562 *
563 * This two-step scenario makes us unable to guarantee a
564 * robust alpha blending between the 4 layers in all
565 * situations, since this means that we need to have one layer
566 * with alpha at the lowest position of our two pipes.
567 *
568 * However, we cannot even do that on every platform, since
569 * the hardware has a bug where the lowest plane of the lowest
570 * pipe (pipe 0, priority 0), if it has any alpha, will
571 * discard the pixel data entirely and just display the pixels
572 * in the background color (black by default).
573 *
574 * This means that on the affected platforms, we effectively
575 * have only three valid configurations with alpha, all of
576 * them with the alpha being on pipe1 with the lowest
577 * position, which can be 1, 2 or 3 depending on the number of
578 * planes and their zpos.
579 */
580
581 /* For platforms that are not affected by the issue described above. */
582 if (backend->quirks->supports_lowest_plane_alpha)
583 num_alpha_planes_max++;
584
585 if (num_alpha_planes > num_alpha_planes_max) {
586 DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
587 return -EINVAL;
588 }
589
590 /* We can't have an alpha plane at the lowest position */
591 if (!backend->quirks->supports_lowest_plane_alpha &&
592 (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
593 return -EINVAL;
594
595 for (i = 1; i < num_planes; i++) {
596 struct drm_plane_state *p_state = plane_states[i];
597 struct drm_framebuffer *fb = p_state->fb;
598 struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state);
599
600 /*
601 * The only alpha position is the lowest plane of the
602 * second pipe.
603 */
604 if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
605 current_pipe++;
606
607 s_state->pipe = current_pipe;
608 }
609
610 /* We can only have a single YUV plane at a time */
611 if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
612 DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
613 return -EINVAL;
614 }
615
616 if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
617 DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
618 return -EINVAL;
619 }
620
621 DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
622 num_planes, num_alpha_planes, num_frontend_planes,
623 num_yuv_planes);
624
625 return 0;
626}
627
628static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine)
629{
630 struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
631 struct sun4i_frontend *frontend = backend->frontend;
632
633 if (!frontend)
634 return;
635
636 /*
637 * In a teardown scenario with the frontend involved, we have
638 * to keep the frontend enabled until the next vblank, and
639 * only then disable it.
640 *
641 * This is due to the fact that the backend will not take into
642 * account the new configuration (with the plane that used to
643 * be fed by the frontend now disabled) until we write to the
644 * commit bit and the hardware fetches the new configuration
645 * during the next vblank.
646 *
647 * So we keep the frontend around in order to prevent any
648 * visual artifacts.
649 */
650 spin_lock(&backend->frontend_lock);
651 if (backend->frontend_teardown) {
652 sun4i_frontend_exit(frontend);
653 backend->frontend_teardown = false;
654 }
655 spin_unlock(&backend->frontend_lock);
656};
657
658static int sun4i_backend_init_sat(struct device *dev) {
659 struct sun4i_backend *backend = dev_get_drvdata(dev);
660 int ret;
661
662 backend->sat_reset = devm_reset_control_get(dev, "sat");
663 if (IS_ERR(backend->sat_reset)) {
664 dev_err(dev, "Couldn't get the SAT reset line\n");
665 return PTR_ERR(backend->sat_reset);
666 }
667
668 ret = reset_control_deassert(backend->sat_reset);
669 if (ret) {
670 dev_err(dev, "Couldn't deassert the SAT reset line\n");
671 return ret;
672 }
673
674 backend->sat_clk = devm_clk_get(dev, "sat");
675 if (IS_ERR(backend->sat_clk)) {
676 dev_err(dev, "Couldn't get our SAT clock\n");
677 ret = PTR_ERR(backend->sat_clk);
678 goto err_assert_reset;
679 }
680
681 ret = clk_prepare_enable(backend->sat_clk);
682 if (ret) {
683 dev_err(dev, "Couldn't enable the SAT clock\n");
684 return ret;
685 }
686
687 return 0;
688
689err_assert_reset:
690 reset_control_assert(backend->sat_reset);
691 return ret;
692}
693
694static int sun4i_backend_free_sat(struct device *dev) {
695 struct sun4i_backend *backend = dev_get_drvdata(dev);
696
697 clk_disable_unprepare(backend->sat_clk);
698 reset_control_assert(backend->sat_reset);
699
700 return 0;
701}
702
703/*
704 * The display backend can take video output from the display frontend, or
705 * the display enhancement unit on the A80, as input for one it its layers.
706 * This relationship within the display pipeline is encoded in the device
707 * tree with of_graph, and we use it here to figure out which backend, if
708 * there are 2 or more, we are currently probing. The number would be in
709 * the "reg" property of the upstream output port endpoint.
710 */
711static int sun4i_backend_of_get_id(struct device_node *node)
712{
713 struct device_node *ep, *remote;
714 struct of_endpoint of_ep;
715
716 /* Input port is 0, and we want the first endpoint. */
717 ep = of_graph_get_endpoint_by_regs(node, 0, -1);
718 if (!ep)
719 return -EINVAL;
720
721 remote = of_graph_get_remote_endpoint(ep);
722 of_node_put(ep);
723 if (!remote)
724 return -EINVAL;
725
726 of_graph_parse_endpoint(remote, &of_ep);
727 of_node_put(remote);
728 return of_ep.id;
729}
730
731/* TODO: This needs to take multiple pipelines into account */
732static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
733 struct device_node *node)
734{
735 struct device_node *port, *ep, *remote;
736 struct sun4i_frontend *frontend;
737
738 port = of_graph_get_port_by_id(node, 0);
739 if (!port)
740 return ERR_PTR(-EINVAL);
741
742 for_each_available_child_of_node(port, ep) {
743 remote = of_graph_get_remote_port_parent(ep);
744 if (!remote)
745 continue;
746 of_node_put(remote);
747
748 /* does this node match any registered engines? */
749 list_for_each_entry(frontend, &drv->frontend_list, list) {
750 if (remote == frontend->node) {
751 of_node_put(port);
752 of_node_put(ep);
753 return frontend;
754 }
755 }
756 }
757 of_node_put(port);
758 return ERR_PTR(-EINVAL);
759}
760
761static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
762 .atomic_begin = sun4i_backend_atomic_begin,
763 .atomic_check = sun4i_backend_atomic_check,
764 .commit = sun4i_backend_commit,
765 .layers_init = sun4i_layers_init,
766 .apply_color_correction = sun4i_backend_apply_color_correction,
767 .disable_color_correction = sun4i_backend_disable_color_correction,
768 .vblank_quirk = sun4i_backend_vblank_quirk,
769};
770
771static struct regmap_config sun4i_backend_regmap_config = {
772 .reg_bits = 32,
773 .val_bits = 32,
774 .reg_stride = 4,
775 .max_register = 0x5800,
776};
777
778static int sun4i_backend_bind(struct device *dev, struct device *master,
779 void *data)
780{
781 struct platform_device *pdev = to_platform_device(dev);
782 struct drm_device *drm = data;
783 struct sun4i_drv *drv = drm->dev_private;
784 struct sun4i_backend *backend;
785 const struct sun4i_backend_quirks *quirks;
786 struct resource *res;
787 void __iomem *regs;
788 int i, ret;
789
790 backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
791 if (!backend)
792 return -ENOMEM;
793 dev_set_drvdata(dev, backend);
794 spin_lock_init(&backend->frontend_lock);
795
796 if (of_find_property(dev->of_node, "interconnects", NULL)) {
797 /*
798 * This assume we have the same DMA constraints for all our the
799 * devices in our pipeline (all the backends, but also the
800 * frontends). This sounds bad, but it has always been the case
801 * for us, and DRM doesn't do per-device allocation either, so
802 * we would need to fix DRM first...
803 */
804 ret = of_dma_configure(drm->dev, dev->of_node, true);
805 if (ret)
806 return ret;
807 } else {
808 /*
809 * If we don't have the interconnect property, most likely
810 * because of an old DT, we need to set the DMA offset by hand
811 * on our device since the RAM mapping is at 0 for the DMA bus,
812 * unlike the CPU.
813 */
814 drm->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
815 }
816
817 backend->engine.node = dev->of_node;
818 backend->engine.ops = &sun4i_backend_engine_ops;
819 backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
820 if (backend->engine.id < 0)
821 return backend->engine.id;
822
823 backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node);
824 if (IS_ERR(backend->frontend))
825 dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
826
827 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
828 regs = devm_ioremap_resource(dev, res);
829 if (IS_ERR(regs))
830 return PTR_ERR(regs);
831
832 backend->reset = devm_reset_control_get(dev, NULL);
833 if (IS_ERR(backend->reset)) {
834 dev_err(dev, "Couldn't get our reset line\n");
835 return PTR_ERR(backend->reset);
836 }
837
838 ret = reset_control_deassert(backend->reset);
839 if (ret) {
840 dev_err(dev, "Couldn't deassert our reset line\n");
841 return ret;
842 }
843
844 backend->bus_clk = devm_clk_get(dev, "ahb");
845 if (IS_ERR(backend->bus_clk)) {
846 dev_err(dev, "Couldn't get the backend bus clock\n");
847 ret = PTR_ERR(backend->bus_clk);
848 goto err_assert_reset;
849 }
850 clk_prepare_enable(backend->bus_clk);
851
852 backend->mod_clk = devm_clk_get(dev, "mod");
853 if (IS_ERR(backend->mod_clk)) {
854 dev_err(dev, "Couldn't get the backend module clock\n");
855 ret = PTR_ERR(backend->mod_clk);
856 goto err_disable_bus_clk;
857 }
858
859 ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
860 if (ret) {
861 dev_err(dev, "Couldn't set the module clock frequency\n");
862 goto err_disable_bus_clk;
863 }
864
865 clk_prepare_enable(backend->mod_clk);
866
867 backend->ram_clk = devm_clk_get(dev, "ram");
868 if (IS_ERR(backend->ram_clk)) {
869 dev_err(dev, "Couldn't get the backend RAM clock\n");
870 ret = PTR_ERR(backend->ram_clk);
871 goto err_disable_mod_clk;
872 }
873 clk_prepare_enable(backend->ram_clk);
874
875 if (of_device_is_compatible(dev->of_node,
876 "allwinner,sun8i-a33-display-backend")) {
877 ret = sun4i_backend_init_sat(dev);
878 if (ret) {
879 dev_err(dev, "Couldn't init SAT resources\n");
880 goto err_disable_ram_clk;
881 }
882 }
883
884 backend->engine.regs = devm_regmap_init_mmio(dev, regs,
885 &sun4i_backend_regmap_config);
886 if (IS_ERR(backend->engine.regs)) {
887 dev_err(dev, "Couldn't create the backend regmap\n");
888 return PTR_ERR(backend->engine.regs);
889 }
890
891 list_add_tail(&backend->engine.list, &drv->engine_list);
892
893 /*
894 * Many of the backend's layer configuration registers have
895 * undefined default values. This poses a risk as we use
896 * regmap_update_bits in some places, and don't overwrite
897 * the whole register.
898 *
899 * Clear the registers here to have something predictable.
900 */
901 for (i = 0x800; i < 0x1000; i += 4)
902 regmap_write(backend->engine.regs, i, 0);
903
904 /* Disable registers autoloading */
905 regmap_write(backend->engine.regs, SUN4I_BACKEND_REGBUFFCTL_REG,
906 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
907
908 /* Enable the backend */
909 regmap_write(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
910 SUN4I_BACKEND_MODCTL_DEBE_EN |
911 SUN4I_BACKEND_MODCTL_START_CTL);
912
913 /* Set output selection if needed */
914 quirks = of_device_get_match_data(dev);
915 if (quirks->needs_output_muxing) {
916 /*
917 * We assume there is no dynamic muxing of backends
918 * and TCONs, so we select the backend with same ID.
919 *
920 * While dynamic selection might be interesting, since
921 * the CRTC is tied to the TCON, while the layers are
922 * tied to the backends, this means, we will need to
923 * switch between groups of layers. There might not be
924 * a way to represent this constraint in DRM.
925 */
926 regmap_update_bits(backend->engine.regs,
927 SUN4I_BACKEND_MODCTL_REG,
928 SUN4I_BACKEND_MODCTL_OUT_SEL,
929 (backend->engine.id
930 ? SUN4I_BACKEND_MODCTL_OUT_LCD1
931 : SUN4I_BACKEND_MODCTL_OUT_LCD0));
932 }
933
934 backend->quirks = quirks;
935
936 return 0;
937
938err_disable_ram_clk:
939 clk_disable_unprepare(backend->ram_clk);
940err_disable_mod_clk:
941 clk_rate_exclusive_put(backend->mod_clk);
942 clk_disable_unprepare(backend->mod_clk);
943err_disable_bus_clk:
944 clk_disable_unprepare(backend->bus_clk);
945err_assert_reset:
946 reset_control_assert(backend->reset);
947 return ret;
948}
949
950static void sun4i_backend_unbind(struct device *dev, struct device *master,
951 void *data)
952{
953 struct sun4i_backend *backend = dev_get_drvdata(dev);
954
955 list_del(&backend->engine.list);
956
957 if (of_device_is_compatible(dev->of_node,
958 "allwinner,sun8i-a33-display-backend"))
959 sun4i_backend_free_sat(dev);
960
961 clk_disable_unprepare(backend->ram_clk);
962 clk_rate_exclusive_put(backend->mod_clk);
963 clk_disable_unprepare(backend->mod_clk);
964 clk_disable_unprepare(backend->bus_clk);
965 reset_control_assert(backend->reset);
966}
967
968static const struct component_ops sun4i_backend_ops = {
969 .bind = sun4i_backend_bind,
970 .unbind = sun4i_backend_unbind,
971};
972
973static int sun4i_backend_probe(struct platform_device *pdev)
974{
975 return component_add(&pdev->dev, &sun4i_backend_ops);
976}
977
978static int sun4i_backend_remove(struct platform_device *pdev)
979{
980 component_del(&pdev->dev, &sun4i_backend_ops);
981
982 return 0;
983}
984
985static const struct sun4i_backend_quirks sun4i_backend_quirks = {
986 .needs_output_muxing = true,
987};
988
989static const struct sun4i_backend_quirks sun5i_backend_quirks = {
990};
991
992static const struct sun4i_backend_quirks sun6i_backend_quirks = {
993};
994
995static const struct sun4i_backend_quirks sun7i_backend_quirks = {
996 .needs_output_muxing = true,
997};
998
999static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
1000 .supports_lowest_plane_alpha = true,
1001};
1002
1003static const struct sun4i_backend_quirks sun9i_backend_quirks = {
1004};
1005
1006static const struct of_device_id sun4i_backend_of_table[] = {
1007 {
1008 .compatible = "allwinner,sun4i-a10-display-backend",
1009 .data = &sun4i_backend_quirks,
1010 },
1011 {
1012 .compatible = "allwinner,sun5i-a13-display-backend",
1013 .data = &sun5i_backend_quirks,
1014 },
1015 {
1016 .compatible = "allwinner,sun6i-a31-display-backend",
1017 .data = &sun6i_backend_quirks,
1018 },
1019 {
1020 .compatible = "allwinner,sun7i-a20-display-backend",
1021 .data = &sun7i_backend_quirks,
1022 },
1023 {
1024 .compatible = "allwinner,sun8i-a23-display-backend",
1025 .data = &sun8i_a33_backend_quirks,
1026 },
1027 {
1028 .compatible = "allwinner,sun8i-a33-display-backend",
1029 .data = &sun8i_a33_backend_quirks,
1030 },
1031 {
1032 .compatible = "allwinner,sun9i-a80-display-backend",
1033 .data = &sun9i_backend_quirks,
1034 },
1035 { }
1036};
1037MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
1038
1039static struct platform_driver sun4i_backend_platform_driver = {
1040 .probe = sun4i_backend_probe,
1041 .remove = sun4i_backend_remove,
1042 .driver = {
1043 .name = "sun4i-backend",
1044 .of_match_table = sun4i_backend_of_table,
1045 },
1046};
1047module_platform_driver(sun4i_backend_platform_driver);
1048
1049MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1050MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
1051MODULE_LICENSE("GPL");