Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * rcar_du_crtc.c -- R-Car Display Unit CRTCs
4 *
5 * Copyright (C) 2013-2015 Renesas Electronics Corporation
6 *
7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8 */
9
10#include <linux/clk.h>
11#include <linux/mutex.h>
12#include <linux/platform_device.h>
13#include <linux/sys_soc.h>
14
15#include <drm/drm_atomic.h>
16#include <drm/drm_atomic_helper.h>
17#include <drm/drm_crtc.h>
18#include <drm/drm_device.h>
19#include <drm/drm_fb_cma_helper.h>
20#include <drm/drm_gem_cma_helper.h>
21#include <drm/drm_plane_helper.h>
22#include <drm/drm_vblank.h>
23
24#include "rcar_du_crtc.h"
25#include "rcar_du_drv.h"
26#include "rcar_du_encoder.h"
27#include "rcar_du_kms.h"
28#include "rcar_du_plane.h"
29#include "rcar_du_regs.h"
30#include "rcar_du_vsp.h"
31#include "rcar_lvds.h"
32
33static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
34{
35 struct rcar_du_device *rcdu = rcrtc->dev;
36
37 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
38}
39
40static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
41{
42 struct rcar_du_device *rcdu = rcrtc->dev;
43
44 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
45}
46
47static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
48{
49 struct rcar_du_device *rcdu = rcrtc->dev;
50
51 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
52 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
53}
54
55static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
56{
57 struct rcar_du_device *rcdu = rcrtc->dev;
58
59 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
60 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
61}
62
63void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set)
64{
65 struct rcar_du_device *rcdu = rcrtc->dev;
66
67 rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set;
68 rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr);
69}
70
71/* -----------------------------------------------------------------------------
72 * Hardware Setup
73 */
74
75struct dpll_info {
76 unsigned int output;
77 unsigned int fdpll;
78 unsigned int n;
79 unsigned int m;
80};
81
82static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
83 struct dpll_info *dpll,
84 unsigned long input,
85 unsigned long target)
86{
87 unsigned long best_diff = (unsigned long)-1;
88 unsigned long diff;
89 unsigned int fdpll;
90 unsigned int m;
91 unsigned int n;
92
93 /*
94 * fin fvco fout fclkout
95 * in --> [1/M] --> |PD| -> [LPF] -> [VCO] -> [1/P] -+-> [1/FDPLL] -> out
96 * +-> | | |
97 * | |
98 * +---------------- [1/N] <------------+
99 *
100 * fclkout = fvco / P / FDPLL -- (1)
101 *
102 * fin/M = fvco/P/N
103 *
104 * fvco = fin * P * N / M -- (2)
105 *
106 * (1) + (2) indicates
107 *
108 * fclkout = fin * N / M / FDPLL
109 *
110 * NOTES
111 * N : (n + 1)
112 * M : (m + 1)
113 * FDPLL : (fdpll + 1)
114 * P : 2
115 * 2kHz < fvco < 4096MHz
116 *
117 * To minimize the jitter,
118 * N : as large as possible
119 * M : as small as possible
120 */
121 for (m = 0; m < 4; m++) {
122 for (n = 119; n > 38; n--) {
123 /*
124 * This code only runs on 64-bit architectures, the
125 * unsigned long type can thus be used for 64-bit
126 * computation. It will still compile without any
127 * warning on 32-bit architectures.
128 *
129 * To optimize calculations, use fout instead of fvco
130 * to verify the VCO frequency constraint.
131 */
132 unsigned long fout = input * (n + 1) / (m + 1);
133
134 if (fout < 1000 || fout > 2048 * 1000 * 1000U)
135 continue;
136
137 for (fdpll = 1; fdpll < 32; fdpll++) {
138 unsigned long output;
139
140 output = fout / (fdpll + 1);
141 if (output >= 400 * 1000 * 1000)
142 continue;
143
144 diff = abs((long)output - (long)target);
145 if (best_diff > diff) {
146 best_diff = diff;
147 dpll->n = n;
148 dpll->m = m;
149 dpll->fdpll = fdpll;
150 dpll->output = output;
151 }
152
153 if (diff == 0)
154 goto done;
155 }
156 }
157 }
158
159done:
160 dev_dbg(rcrtc->dev->dev,
161 "output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
162 dpll->output, dpll->fdpll, dpll->n, dpll->m, best_diff);
163}
164
165struct du_clk_params {
166 struct clk *clk;
167 unsigned long rate;
168 unsigned long diff;
169 u32 escr;
170};
171
172static void rcar_du_escr_divider(struct clk *clk, unsigned long target,
173 u32 escr, struct du_clk_params *params)
174{
175 unsigned long rate;
176 unsigned long diff;
177 u32 div;
178
179 /*
180 * If the target rate has already been achieved perfectly we can't do
181 * better.
182 */
183 if (params->diff == 0)
184 return;
185
186 /*
187 * Compute the input clock rate and internal divisor values to obtain
188 * the clock rate closest to the target frequency.
189 */
190 rate = clk_round_rate(clk, target);
191 div = clamp(DIV_ROUND_CLOSEST(rate, target), 1UL, 64UL) - 1;
192 diff = abs(rate / (div + 1) - target);
193
194 /*
195 * Store the parameters if the resulting frequency is better than any
196 * previously calculated value.
197 */
198 if (diff < params->diff) {
199 params->clk = clk;
200 params->rate = rate;
201 params->diff = diff;
202 params->escr = escr | div;
203 }
204}
205
206static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
207 { .soc_id = "r8a7795", .revision = "ES1.*" },
208 { /* sentinel */ }
209};
210
211static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
212{
213 const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
214 struct rcar_du_device *rcdu = rcrtc->dev;
215 unsigned long mode_clock = mode->clock * 1000;
216 u32 dsmr;
217 u32 escr;
218
219 if (rcdu->info->dpll_mask & (1 << rcrtc->index)) {
220 unsigned long target = mode_clock;
221 struct dpll_info dpll = { 0 };
222 unsigned long extclk;
223 u32 dpllcr;
224 u32 div = 0;
225
226 /*
227 * DU channels that have a display PLL can't use the internal
228 * system clock, and have no internal clock divider.
229 */
230
231 /*
232 * The H3 ES1.x exhibits dot clock duty cycle stability issues.
233 * We can work around them by configuring the DPLL to twice the
234 * desired frequency, coupled with a /2 post-divider. Restrict
235 * the workaround to H3 ES1.x as ES2.0 and all other SoCs have
236 * no post-divider when a display PLL is present (as shown by
237 * the workaround breaking HDMI output on M3-W during testing).
238 */
239 if (soc_device_match(rcar_du_r8a7795_es1)) {
240 target *= 2;
241 div = 1;
242 }
243
244 extclk = clk_get_rate(rcrtc->extclock);
245 rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
246
247 dpllcr = DPLLCR_CODE | DPLLCR_CLKE
248 | DPLLCR_FDPLL(dpll.fdpll)
249 | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
250 | DPLLCR_STBY;
251
252 if (rcrtc->index == 1)
253 dpllcr |= DPLLCR_PLCS1
254 | DPLLCR_INCS_DOTCLKIN1;
255 else
256 dpllcr |= DPLLCR_PLCS0
257 | DPLLCR_INCS_DOTCLKIN0;
258
259 rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
260
261 escr = ESCR_DCLKSEL_DCLKIN | div;
262 } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
263 /*
264 * Use the LVDS PLL output as the dot clock when outputting to
265 * the LVDS encoder on an SoC that supports this clock routing
266 * option. We use the clock directly in that case, without any
267 * additional divider.
268 */
269 escr = ESCR_DCLKSEL_DCLKIN;
270 } else {
271 struct du_clk_params params = { .diff = (unsigned long)-1 };
272
273 rcar_du_escr_divider(rcrtc->clock, mode_clock,
274 ESCR_DCLKSEL_CLKS, ¶ms);
275 if (rcrtc->extclock)
276 rcar_du_escr_divider(rcrtc->extclock, mode_clock,
277 ESCR_DCLKSEL_DCLKIN, ¶ms);
278
279 dev_dbg(rcrtc->dev->dev, "mode clock %lu %s rate %lu\n",
280 mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext",
281 params.rate);
282
283 clk_set_rate(params.clk, params.rate);
284 escr = params.escr;
285 }
286
287 dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
288
289 rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
290 rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
291
292 /* Signal polarities */
293 dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
294 | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
295 | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0)
296 | DSMR_DIPM_DISP | DSMR_CSPM;
297 rcar_du_crtc_write(rcrtc, DSMR, dsmr);
298
299 /* Display timings */
300 rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
301 rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
302 mode->hdisplay - 19);
303 rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
304 mode->hsync_start - 1);
305 rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1);
306
307 rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal -
308 mode->crtc_vsync_end - 2);
309 rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal -
310 mode->crtc_vsync_end +
311 mode->crtc_vdisplay - 2);
312 rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal -
313 mode->crtc_vsync_end +
314 mode->crtc_vsync_start - 1);
315 rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
316
317 rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
318 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
319}
320
321static unsigned int plane_zpos(struct rcar_du_plane *plane)
322{
323 return plane->plane.state->normalized_zpos;
324}
325
326static const struct rcar_du_format_info *
327plane_format(struct rcar_du_plane *plane)
328{
329 return to_rcar_plane_state(plane->plane.state)->format;
330}
331
332static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
333{
334 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
335 struct rcar_du_device *rcdu = rcrtc->dev;
336 unsigned int num_planes = 0;
337 unsigned int dptsr_planes;
338 unsigned int hwplanes = 0;
339 unsigned int prio = 0;
340 unsigned int i;
341 u32 dspr = 0;
342
343 for (i = 0; i < rcrtc->group->num_planes; ++i) {
344 struct rcar_du_plane *plane = &rcrtc->group->planes[i];
345 unsigned int j;
346
347 if (plane->plane.state->crtc != &rcrtc->crtc ||
348 !plane->plane.state->visible)
349 continue;
350
351 /* Insert the plane in the sorted planes array. */
352 for (j = num_planes++; j > 0; --j) {
353 if (plane_zpos(planes[j-1]) <= plane_zpos(plane))
354 break;
355 planes[j] = planes[j-1];
356 }
357
358 planes[j] = plane;
359 prio += plane_format(plane)->planes * 4;
360 }
361
362 for (i = 0; i < num_planes; ++i) {
363 struct rcar_du_plane *plane = planes[i];
364 struct drm_plane_state *state = plane->plane.state;
365 unsigned int index = to_rcar_plane_state(state)->hwindex;
366
367 prio -= 4;
368 dspr |= (index + 1) << prio;
369 hwplanes |= 1 << index;
370
371 if (plane_format(plane)->planes == 2) {
372 index = (index + 1) % 8;
373
374 prio -= 4;
375 dspr |= (index + 1) << prio;
376 hwplanes |= 1 << index;
377 }
378 }
379
380 /* If VSP+DU integration is enabled the plane assignment is fixed. */
381 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
382 if (rcdu->info->gen < 3) {
383 dspr = (rcrtc->index % 2) + 1;
384 hwplanes = 1 << (rcrtc->index % 2);
385 } else {
386 dspr = (rcrtc->index % 2) ? 3 : 1;
387 hwplanes = 1 << ((rcrtc->index % 2) ? 2 : 0);
388 }
389 }
390
391 /*
392 * Update the planes to display timing and dot clock generator
393 * associations.
394 *
395 * Updating the DPTSR register requires restarting the CRTC group,
396 * resulting in visible flicker. To mitigate the issue only update the
397 * association if needed by enabled planes. Planes being disabled will
398 * keep their current association.
399 */
400 mutex_lock(&rcrtc->group->lock);
401
402 dptsr_planes = rcrtc->index % 2 ? rcrtc->group->dptsr_planes | hwplanes
403 : rcrtc->group->dptsr_planes & ~hwplanes;
404
405 if (dptsr_planes != rcrtc->group->dptsr_planes) {
406 rcar_du_group_write(rcrtc->group, DPTSR,
407 (dptsr_planes << 16) | dptsr_planes);
408 rcrtc->group->dptsr_planes = dptsr_planes;
409
410 if (rcrtc->group->used_crtcs)
411 rcar_du_group_restart(rcrtc->group);
412 }
413
414 /* Restart the group if plane sources have changed. */
415 if (rcrtc->group->need_restart)
416 rcar_du_group_restart(rcrtc->group);
417
418 mutex_unlock(&rcrtc->group->lock);
419
420 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
421 dspr);
422}
423
424/* -----------------------------------------------------------------------------
425 * Page Flip
426 */
427
428void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
429{
430 struct drm_pending_vblank_event *event;
431 struct drm_device *dev = rcrtc->crtc.dev;
432 unsigned long flags;
433
434 spin_lock_irqsave(&dev->event_lock, flags);
435 event = rcrtc->event;
436 rcrtc->event = NULL;
437 spin_unlock_irqrestore(&dev->event_lock, flags);
438
439 if (event == NULL)
440 return;
441
442 spin_lock_irqsave(&dev->event_lock, flags);
443 drm_crtc_send_vblank_event(&rcrtc->crtc, event);
444 wake_up(&rcrtc->flip_wait);
445 spin_unlock_irqrestore(&dev->event_lock, flags);
446
447 drm_crtc_vblank_put(&rcrtc->crtc);
448}
449
450static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
451{
452 struct drm_device *dev = rcrtc->crtc.dev;
453 unsigned long flags;
454 bool pending;
455
456 spin_lock_irqsave(&dev->event_lock, flags);
457 pending = rcrtc->event != NULL;
458 spin_unlock_irqrestore(&dev->event_lock, flags);
459
460 return pending;
461}
462
463static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
464{
465 struct rcar_du_device *rcdu = rcrtc->dev;
466
467 if (wait_event_timeout(rcrtc->flip_wait,
468 !rcar_du_crtc_page_flip_pending(rcrtc),
469 msecs_to_jiffies(50)))
470 return;
471
472 dev_warn(rcdu->dev, "page flip timeout\n");
473
474 rcar_du_crtc_finish_page_flip(rcrtc);
475}
476
477/* -----------------------------------------------------------------------------
478 * Start/Stop and Suspend/Resume
479 */
480
481static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
482{
483 /* Set display off and background to black */
484 rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
485 rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
486
487 /* Configure display timings and output routing */
488 rcar_du_crtc_set_display_timing(rcrtc);
489 rcar_du_group_set_routing(rcrtc->group);
490
491 /* Start with all planes disabled. */
492 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
493
494 /* Enable the VSP compositor. */
495 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
496 rcar_du_vsp_enable(rcrtc);
497
498 /* Turn vertical blanking interrupt reporting on. */
499 drm_crtc_vblank_on(&rcrtc->crtc);
500}
501
502static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
503{
504 int ret;
505
506 /*
507 * Guard against double-get, as the function is called from both the
508 * .atomic_enable() and .atomic_begin() handlers.
509 */
510 if (rcrtc->initialized)
511 return 0;
512
513 ret = clk_prepare_enable(rcrtc->clock);
514 if (ret < 0)
515 return ret;
516
517 ret = clk_prepare_enable(rcrtc->extclock);
518 if (ret < 0)
519 goto error_clock;
520
521 ret = rcar_du_group_get(rcrtc->group);
522 if (ret < 0)
523 goto error_group;
524
525 rcar_du_crtc_setup(rcrtc);
526 rcrtc->initialized = true;
527
528 return 0;
529
530error_group:
531 clk_disable_unprepare(rcrtc->extclock);
532error_clock:
533 clk_disable_unprepare(rcrtc->clock);
534 return ret;
535}
536
537static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
538{
539 rcar_du_group_put(rcrtc->group);
540
541 clk_disable_unprepare(rcrtc->extclock);
542 clk_disable_unprepare(rcrtc->clock);
543
544 rcrtc->initialized = false;
545}
546
547static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
548{
549 bool interlaced;
550
551 /*
552 * Select master sync mode. This enables display operation in master
553 * sync mode (with the HSYNC and VSYNC signals configured as outputs and
554 * actively driven).
555 */
556 interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE;
557 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK | DSYSR_SCM_MASK,
558 (interlaced ? DSYSR_SCM_INT_VIDEO : 0) |
559 DSYSR_TVM_MASTER);
560
561 rcar_du_group_start_stop(rcrtc->group, true);
562}
563
564static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
565{
566 struct rcar_du_device *rcdu = rcrtc->dev;
567 struct drm_crtc *crtc = &rcrtc->crtc;
568 u32 status;
569
570 /* Make sure vblank interrupts are enabled. */
571 drm_crtc_vblank_get(crtc);
572
573 /*
574 * Disable planes and calculate how many vertical blanking interrupts we
575 * have to wait for. If a vertical blanking interrupt has been triggered
576 * but not processed yet, we don't know whether it occurred before or
577 * after the planes got disabled. We thus have to wait for two vblank
578 * interrupts in that case.
579 */
580 spin_lock_irq(&rcrtc->vblank_lock);
581 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
582 status = rcar_du_crtc_read(rcrtc, DSSR);
583 rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
584 spin_unlock_irq(&rcrtc->vblank_lock);
585
586 if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
587 msecs_to_jiffies(100)))
588 dev_warn(rcdu->dev, "vertical blanking timeout\n");
589
590 drm_crtc_vblank_put(crtc);
591}
592
593static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
594{
595 struct drm_crtc *crtc = &rcrtc->crtc;
596
597 /*
598 * Disable all planes and wait for the change to take effect. This is
599 * required as the plane enable registers are updated on vblank, and no
600 * vblank will occur once the CRTC is stopped. Disabling planes when
601 * starting the CRTC thus wouldn't be enough as it would start scanning
602 * out immediately from old frame buffers until the next vblank.
603 *
604 * This increases the CRTC stop delay, especially when multiple CRTCs
605 * are stopped in one operation as we now wait for one vblank per CRTC.
606 * Whether this can be improved needs to be researched.
607 */
608 rcar_du_crtc_disable_planes(rcrtc);
609
610 /*
611 * Disable vertical blanking interrupt reporting. We first need to wait
612 * for page flip completion before stopping the CRTC as userspace
613 * expects page flips to eventually complete.
614 */
615 rcar_du_crtc_wait_page_flip(rcrtc);
616 drm_crtc_vblank_off(crtc);
617
618 /* Disable the VSP compositor. */
619 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
620 rcar_du_vsp_disable(rcrtc);
621
622 /*
623 * Select switch sync mode. This stops display operation and configures
624 * the HSYNC and VSYNC signals as inputs.
625 *
626 * TODO: Find another way to stop the display for DUs that don't support
627 * TVM sync.
628 */
629 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_TVM_SYNC))
630 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK,
631 DSYSR_TVM_SWITCH);
632
633 rcar_du_group_start_stop(rcrtc->group, false);
634}
635
636/* -----------------------------------------------------------------------------
637 * CRTC Functions
638 */
639
640static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
641 struct drm_crtc_state *state)
642{
643 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state);
644 struct drm_encoder *encoder;
645
646 /* Store the routes from the CRTC output to the DU outputs. */
647 rstate->outputs = 0;
648
649 drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
650 struct rcar_du_encoder *renc;
651
652 /* Skip the writeback encoder. */
653 if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
654 continue;
655
656 renc = to_rcar_encoder(encoder);
657 rstate->outputs |= BIT(renc->output);
658 }
659
660 return 0;
661}
662
663static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
664 struct drm_crtc_state *old_state)
665{
666 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
667 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
668 struct rcar_du_device *rcdu = rcrtc->dev;
669
670 rcar_du_crtc_get(rcrtc);
671
672 /*
673 * On D3/E3 the dot clock is provided by the LVDS encoder attached to
674 * the DU channel. We need to enable its clock output explicitly if
675 * the LVDS output is disabled.
676 */
677 if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
678 rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
679 struct rcar_du_encoder *encoder =
680 rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
681 const struct drm_display_mode *mode =
682 &crtc->state->adjusted_mode;
683
684 rcar_lvds_clk_enable(encoder->base.bridge,
685 mode->clock * 1000);
686 }
687
688 rcar_du_crtc_start(rcrtc);
689}
690
691static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
692 struct drm_crtc_state *old_state)
693{
694 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
695 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
696 struct rcar_du_device *rcdu = rcrtc->dev;
697
698 rcar_du_crtc_stop(rcrtc);
699 rcar_du_crtc_put(rcrtc);
700
701 if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
702 rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
703 struct rcar_du_encoder *encoder =
704 rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
705
706 /*
707 * Disable the LVDS clock output, see
708 * rcar_du_crtc_atomic_enable().
709 */
710 rcar_lvds_clk_disable(encoder->base.bridge);
711 }
712
713 spin_lock_irq(&crtc->dev->event_lock);
714 if (crtc->state->event) {
715 drm_crtc_send_vblank_event(crtc, crtc->state->event);
716 crtc->state->event = NULL;
717 }
718 spin_unlock_irq(&crtc->dev->event_lock);
719}
720
721static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
722 struct drm_crtc_state *old_crtc_state)
723{
724 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
725
726 WARN_ON(!crtc->state->enable);
727
728 /*
729 * If a mode set is in progress we can be called with the CRTC disabled.
730 * We thus need to first get and setup the CRTC in order to configure
731 * planes. We must *not* put the CRTC in .atomic_flush(), as it must be
732 * kept awake until the .atomic_enable() call that will follow. The get
733 * operation in .atomic_enable() will in that case be a no-op, and the
734 * CRTC will be put later in .atomic_disable().
735 *
736 * If a mode set is not in progress the CRTC is enabled, and the
737 * following get call will be a no-op. There is thus no need to balance
738 * it in .atomic_flush() either.
739 */
740 rcar_du_crtc_get(rcrtc);
741
742 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
743 rcar_du_vsp_atomic_begin(rcrtc);
744}
745
746static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
747 struct drm_crtc_state *old_crtc_state)
748{
749 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
750 struct drm_device *dev = rcrtc->crtc.dev;
751 unsigned long flags;
752
753 rcar_du_crtc_update_planes(rcrtc);
754
755 if (crtc->state->event) {
756 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
757
758 spin_lock_irqsave(&dev->event_lock, flags);
759 rcrtc->event = crtc->state->event;
760 crtc->state->event = NULL;
761 spin_unlock_irqrestore(&dev->event_lock, flags);
762 }
763
764 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
765 rcar_du_vsp_atomic_flush(rcrtc);
766}
767
768static enum drm_mode_status
769rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
770 const struct drm_display_mode *mode)
771{
772 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
773 struct rcar_du_device *rcdu = rcrtc->dev;
774 bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
775 unsigned int vbp;
776
777 if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
778 return MODE_NO_INTERLACE;
779
780 /*
781 * The hardware requires a minimum combined horizontal sync and back
782 * porch of 20 pixels and a minimum vertical back porch of 3 lines.
783 */
784 if (mode->htotal - mode->hsync_start < 20)
785 return MODE_HBLANK_NARROW;
786
787 vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
788 if (vbp < 3)
789 return MODE_VBLANK_NARROW;
790
791 return MODE_OK;
792}
793
794static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
795 .atomic_check = rcar_du_crtc_atomic_check,
796 .atomic_begin = rcar_du_crtc_atomic_begin,
797 .atomic_flush = rcar_du_crtc_atomic_flush,
798 .atomic_enable = rcar_du_crtc_atomic_enable,
799 .atomic_disable = rcar_du_crtc_atomic_disable,
800 .mode_valid = rcar_du_crtc_mode_valid,
801};
802
803static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
804{
805 struct rcar_du_device *rcdu = rcrtc->dev;
806 const char **sources;
807 unsigned int count;
808 int i = -1;
809
810 /* CRC available only on Gen3 HW. */
811 if (rcdu->info->gen < 3)
812 return;
813
814 /* Reserve 1 for "auto" source. */
815 count = rcrtc->vsp->num_planes + 1;
816
817 sources = kmalloc_array(count, sizeof(*sources), GFP_KERNEL);
818 if (!sources)
819 return;
820
821 sources[0] = kstrdup("auto", GFP_KERNEL);
822 if (!sources[0])
823 goto error;
824
825 for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
826 struct drm_plane *plane = &rcrtc->vsp->planes[i].plane;
827 char name[16];
828
829 sprintf(name, "plane%u", plane->base.id);
830 sources[i + 1] = kstrdup(name, GFP_KERNEL);
831 if (!sources[i + 1])
832 goto error;
833 }
834
835 rcrtc->sources = sources;
836 rcrtc->sources_count = count;
837 return;
838
839error:
840 while (i >= 0) {
841 kfree(sources[i]);
842 i--;
843 }
844 kfree(sources);
845}
846
847static void rcar_du_crtc_crc_cleanup(struct rcar_du_crtc *rcrtc)
848{
849 unsigned int i;
850
851 if (!rcrtc->sources)
852 return;
853
854 for (i = 0; i < rcrtc->sources_count; i++)
855 kfree(rcrtc->sources[i]);
856 kfree(rcrtc->sources);
857
858 rcrtc->sources = NULL;
859 rcrtc->sources_count = 0;
860}
861
862static struct drm_crtc_state *
863rcar_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
864{
865 struct rcar_du_crtc_state *state;
866 struct rcar_du_crtc_state *copy;
867
868 if (WARN_ON(!crtc->state))
869 return NULL;
870
871 state = to_rcar_crtc_state(crtc->state);
872 copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
873 if (copy == NULL)
874 return NULL;
875
876 __drm_atomic_helper_crtc_duplicate_state(crtc, ©->state);
877
878 return ©->state;
879}
880
881static void rcar_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
882 struct drm_crtc_state *state)
883{
884 __drm_atomic_helper_crtc_destroy_state(state);
885 kfree(to_rcar_crtc_state(state));
886}
887
888static void rcar_du_crtc_cleanup(struct drm_crtc *crtc)
889{
890 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
891
892 rcar_du_crtc_crc_cleanup(rcrtc);
893
894 return drm_crtc_cleanup(crtc);
895}
896
897static void rcar_du_crtc_reset(struct drm_crtc *crtc)
898{
899 struct rcar_du_crtc_state *state;
900
901 if (crtc->state) {
902 rcar_du_crtc_atomic_destroy_state(crtc, crtc->state);
903 crtc->state = NULL;
904 }
905
906 state = kzalloc(sizeof(*state), GFP_KERNEL);
907 if (state == NULL)
908 return;
909
910 state->crc.source = VSP1_DU_CRC_NONE;
911 state->crc.index = 0;
912
913 crtc->state = &state->state;
914 crtc->state->crtc = crtc;
915}
916
917static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
918{
919 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
920
921 rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
922 rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
923 rcrtc->vblank_enable = true;
924
925 return 0;
926}
927
928static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
929{
930 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
931
932 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
933 rcrtc->vblank_enable = false;
934}
935
936static int rcar_du_crtc_parse_crc_source(struct rcar_du_crtc *rcrtc,
937 const char *source_name,
938 enum vsp1_du_crc_source *source)
939{
940 unsigned int index;
941 int ret;
942
943 /*
944 * Parse the source name. Supported values are "plane%u" to compute the
945 * CRC on an input plane (%u is the plane ID), and "auto" to compute the
946 * CRC on the composer (VSP) output.
947 */
948
949 if (!source_name) {
950 *source = VSP1_DU_CRC_NONE;
951 return 0;
952 } else if (!strcmp(source_name, "auto")) {
953 *source = VSP1_DU_CRC_OUTPUT;
954 return 0;
955 } else if (strstarts(source_name, "plane")) {
956 unsigned int i;
957
958 *source = VSP1_DU_CRC_PLANE;
959
960 ret = kstrtouint(source_name + strlen("plane"), 10, &index);
961 if (ret < 0)
962 return ret;
963
964 for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
965 if (index == rcrtc->vsp->planes[i].plane.base.id)
966 return i;
967 }
968 }
969
970 return -EINVAL;
971}
972
973static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
974 const char *source_name,
975 size_t *values_cnt)
976{
977 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
978 enum vsp1_du_crc_source source;
979
980 if (rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source) < 0) {
981 DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
982 return -EINVAL;
983 }
984
985 *values_cnt = 1;
986 return 0;
987}
988
989static const char *const *
990rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count)
991{
992 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
993
994 *count = rcrtc->sources_count;
995 return rcrtc->sources;
996}
997
998static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
999 const char *source_name)
1000{
1001 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
1002 struct drm_modeset_acquire_ctx ctx;
1003 struct drm_crtc_state *crtc_state;
1004 struct drm_atomic_state *state;
1005 enum vsp1_du_crc_source source;
1006 unsigned int index;
1007 int ret;
1008
1009 ret = rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source);
1010 if (ret < 0)
1011 return ret;
1012
1013 index = ret;
1014
1015 /* Perform an atomic commit to set the CRC source. */
1016 drm_modeset_acquire_init(&ctx, 0);
1017
1018 state = drm_atomic_state_alloc(crtc->dev);
1019 if (!state) {
1020 ret = -ENOMEM;
1021 goto unlock;
1022 }
1023
1024 state->acquire_ctx = &ctx;
1025
1026retry:
1027 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1028 if (!IS_ERR(crtc_state)) {
1029 struct rcar_du_crtc_state *rcrtc_state;
1030
1031 rcrtc_state = to_rcar_crtc_state(crtc_state);
1032 rcrtc_state->crc.source = source;
1033 rcrtc_state->crc.index = index;
1034
1035 ret = drm_atomic_commit(state);
1036 } else {
1037 ret = PTR_ERR(crtc_state);
1038 }
1039
1040 if (ret == -EDEADLK) {
1041 drm_atomic_state_clear(state);
1042 drm_modeset_backoff(&ctx);
1043 goto retry;
1044 }
1045
1046 drm_atomic_state_put(state);
1047
1048unlock:
1049 drm_modeset_drop_locks(&ctx);
1050 drm_modeset_acquire_fini(&ctx);
1051
1052 return ret;
1053}
1054
1055static const struct drm_crtc_funcs crtc_funcs_gen2 = {
1056 .reset = rcar_du_crtc_reset,
1057 .destroy = drm_crtc_cleanup,
1058 .set_config = drm_atomic_helper_set_config,
1059 .page_flip = drm_atomic_helper_page_flip,
1060 .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
1061 .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
1062 .enable_vblank = rcar_du_crtc_enable_vblank,
1063 .disable_vblank = rcar_du_crtc_disable_vblank,
1064};
1065
1066static const struct drm_crtc_funcs crtc_funcs_gen3 = {
1067 .reset = rcar_du_crtc_reset,
1068 .destroy = rcar_du_crtc_cleanup,
1069 .set_config = drm_atomic_helper_set_config,
1070 .page_flip = drm_atomic_helper_page_flip,
1071 .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
1072 .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
1073 .enable_vblank = rcar_du_crtc_enable_vblank,
1074 .disable_vblank = rcar_du_crtc_disable_vblank,
1075 .set_crc_source = rcar_du_crtc_set_crc_source,
1076 .verify_crc_source = rcar_du_crtc_verify_crc_source,
1077 .get_crc_sources = rcar_du_crtc_get_crc_sources,
1078};
1079
1080/* -----------------------------------------------------------------------------
1081 * Interrupt Handling
1082 */
1083
1084static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
1085{
1086 struct rcar_du_crtc *rcrtc = arg;
1087 struct rcar_du_device *rcdu = rcrtc->dev;
1088 irqreturn_t ret = IRQ_NONE;
1089 u32 status;
1090
1091 spin_lock(&rcrtc->vblank_lock);
1092
1093 status = rcar_du_crtc_read(rcrtc, DSSR);
1094 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
1095
1096 if (status & DSSR_VBK) {
1097 /*
1098 * Wake up the vblank wait if the counter reaches 0. This must
1099 * be protected by the vblank_lock to avoid races in
1100 * rcar_du_crtc_disable_planes().
1101 */
1102 if (rcrtc->vblank_count) {
1103 if (--rcrtc->vblank_count == 0)
1104 wake_up(&rcrtc->vblank_wait);
1105 }
1106 }
1107
1108 spin_unlock(&rcrtc->vblank_lock);
1109
1110 if (status & DSSR_VBK) {
1111 if (rcdu->info->gen < 3) {
1112 drm_crtc_handle_vblank(&rcrtc->crtc);
1113 rcar_du_crtc_finish_page_flip(rcrtc);
1114 }
1115
1116 ret = IRQ_HANDLED;
1117 }
1118
1119 return ret;
1120}
1121
1122/* -----------------------------------------------------------------------------
1123 * Initialization
1124 */
1125
1126int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
1127 unsigned int hwindex)
1128{
1129 static const unsigned int mmio_offsets[] = {
1130 DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET
1131 };
1132
1133 struct rcar_du_device *rcdu = rgrp->dev;
1134 struct platform_device *pdev = to_platform_device(rcdu->dev);
1135 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[swindex];
1136 struct drm_crtc *crtc = &rcrtc->crtc;
1137 struct drm_plane *primary;
1138 unsigned int irqflags;
1139 struct clk *clk;
1140 char clk_name[9];
1141 char *name;
1142 int irq;
1143 int ret;
1144
1145 /* Get the CRTC clock and the optional external clock. */
1146 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
1147 sprintf(clk_name, "du.%u", hwindex);
1148 name = clk_name;
1149 } else {
1150 name = NULL;
1151 }
1152
1153 rcrtc->clock = devm_clk_get(rcdu->dev, name);
1154 if (IS_ERR(rcrtc->clock)) {
1155 dev_err(rcdu->dev, "no clock for DU channel %u\n", hwindex);
1156 return PTR_ERR(rcrtc->clock);
1157 }
1158
1159 sprintf(clk_name, "dclkin.%u", hwindex);
1160 clk = devm_clk_get(rcdu->dev, clk_name);
1161 if (!IS_ERR(clk)) {
1162 rcrtc->extclock = clk;
1163 } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
1164 return -EPROBE_DEFER;
1165 } else if (rcdu->info->dpll_mask & BIT(hwindex)) {
1166 /*
1167 * DU channels that have a display PLL can't use the internal
1168 * system clock and thus require an external clock.
1169 */
1170 ret = PTR_ERR(clk);
1171 dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret);
1172 return ret;
1173 }
1174
1175 init_waitqueue_head(&rcrtc->flip_wait);
1176 init_waitqueue_head(&rcrtc->vblank_wait);
1177 spin_lock_init(&rcrtc->vblank_lock);
1178
1179 rcrtc->dev = rcdu;
1180 rcrtc->group = rgrp;
1181 rcrtc->mmio_offset = mmio_offsets[hwindex];
1182 rcrtc->index = hwindex;
1183 rcrtc->dsysr = (rcrtc->index % 2 ? 0 : DSYSR_DRES) | DSYSR_TVM_TVSYNC;
1184
1185 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
1186 primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
1187 else
1188 primary = &rgrp->planes[swindex % 2].plane;
1189
1190 ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary, NULL,
1191 rcdu->info->gen <= 2 ?
1192 &crtc_funcs_gen2 : &crtc_funcs_gen3,
1193 NULL);
1194 if (ret < 0)
1195 return ret;
1196
1197 drm_crtc_helper_add(crtc, &crtc_helper_funcs);
1198
1199 /* Start with vertical blanking interrupt reporting disabled. */
1200 drm_crtc_vblank_off(crtc);
1201
1202 /* Register the interrupt handler. */
1203 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
1204 /* The IRQ's are associated with the CRTC (sw)index. */
1205 irq = platform_get_irq(pdev, swindex);
1206 irqflags = 0;
1207 } else {
1208 irq = platform_get_irq(pdev, 0);
1209 irqflags = IRQF_SHARED;
1210 }
1211
1212 if (irq < 0) {
1213 dev_err(rcdu->dev, "no IRQ for CRTC %u\n", swindex);
1214 return irq;
1215 }
1216
1217 ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
1218 dev_name(rcdu->dev), rcrtc);
1219 if (ret < 0) {
1220 dev_err(rcdu->dev,
1221 "failed to register IRQ for CRTC %u\n", swindex);
1222 return ret;
1223 }
1224
1225 rcar_du_crtc_crc_init(rcrtc);
1226
1227 return 0;
1228}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * rcar_du_crtc.c -- R-Car Display Unit CRTCs
4 *
5 * Copyright (C) 2013-2015 Renesas Electronics Corporation
6 *
7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8 */
9
10#include <linux/clk.h>
11#include <linux/mutex.h>
12#include <linux/platform_device.h>
13#include <linux/sys_soc.h>
14
15#include <drm/drm_atomic.h>
16#include <drm/drm_atomic_helper.h>
17#include <drm/drm_bridge.h>
18#include <drm/drm_crtc.h>
19#include <drm/drm_device.h>
20#include <drm/drm_fb_cma_helper.h>
21#include <drm/drm_gem_cma_helper.h>
22#include <drm/drm_plane_helper.h>
23#include <drm/drm_vblank.h>
24
25#include "rcar_cmm.h"
26#include "rcar_du_crtc.h"
27#include "rcar_du_drv.h"
28#include "rcar_du_encoder.h"
29#include "rcar_du_kms.h"
30#include "rcar_du_plane.h"
31#include "rcar_du_regs.h"
32#include "rcar_du_vsp.h"
33#include "rcar_lvds.h"
34
35static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
36{
37 struct rcar_du_device *rcdu = rcrtc->dev;
38
39 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
40}
41
42static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
43{
44 struct rcar_du_device *rcdu = rcrtc->dev;
45
46 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
47}
48
49static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
50{
51 struct rcar_du_device *rcdu = rcrtc->dev;
52
53 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
54 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
55}
56
57static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
58{
59 struct rcar_du_device *rcdu = rcrtc->dev;
60
61 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
62 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
63}
64
65void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set)
66{
67 struct rcar_du_device *rcdu = rcrtc->dev;
68
69 rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set;
70 rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr);
71}
72
73/* -----------------------------------------------------------------------------
74 * Hardware Setup
75 */
76
77struct dpll_info {
78 unsigned int output;
79 unsigned int fdpll;
80 unsigned int n;
81 unsigned int m;
82};
83
84static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
85 struct dpll_info *dpll,
86 unsigned long input,
87 unsigned long target)
88{
89 unsigned long best_diff = (unsigned long)-1;
90 unsigned long diff;
91 unsigned int fdpll;
92 unsigned int m;
93 unsigned int n;
94
95 /*
96 * fin fvco fout fclkout
97 * in --> [1/M] --> |PD| -> [LPF] -> [VCO] -> [1/P] -+-> [1/FDPLL] -> out
98 * +-> | | |
99 * | |
100 * +---------------- [1/N] <------------+
101 *
102 * fclkout = fvco / P / FDPLL -- (1)
103 *
104 * fin/M = fvco/P/N
105 *
106 * fvco = fin * P * N / M -- (2)
107 *
108 * (1) + (2) indicates
109 *
110 * fclkout = fin * N / M / FDPLL
111 *
112 * NOTES
113 * N : (n + 1)
114 * M : (m + 1)
115 * FDPLL : (fdpll + 1)
116 * P : 2
117 * 2kHz < fvco < 4096MHz
118 *
119 * To minimize the jitter,
120 * N : as large as possible
121 * M : as small as possible
122 */
123 for (m = 0; m < 4; m++) {
124 for (n = 119; n > 38; n--) {
125 /*
126 * This code only runs on 64-bit architectures, the
127 * unsigned long type can thus be used for 64-bit
128 * computation. It will still compile without any
129 * warning on 32-bit architectures.
130 *
131 * To optimize calculations, use fout instead of fvco
132 * to verify the VCO frequency constraint.
133 */
134 unsigned long fout = input * (n + 1) / (m + 1);
135
136 if (fout < 1000 || fout > 2048 * 1000 * 1000U)
137 continue;
138
139 for (fdpll = 1; fdpll < 32; fdpll++) {
140 unsigned long output;
141
142 output = fout / (fdpll + 1);
143 if (output >= 400 * 1000 * 1000)
144 continue;
145
146 diff = abs((long)output - (long)target);
147 if (best_diff > diff) {
148 best_diff = diff;
149 dpll->n = n;
150 dpll->m = m;
151 dpll->fdpll = fdpll;
152 dpll->output = output;
153 }
154
155 if (diff == 0)
156 goto done;
157 }
158 }
159 }
160
161done:
162 dev_dbg(rcrtc->dev->dev,
163 "output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
164 dpll->output, dpll->fdpll, dpll->n, dpll->m, best_diff);
165}
166
167struct du_clk_params {
168 struct clk *clk;
169 unsigned long rate;
170 unsigned long diff;
171 u32 escr;
172};
173
174static void rcar_du_escr_divider(struct clk *clk, unsigned long target,
175 u32 escr, struct du_clk_params *params)
176{
177 unsigned long rate;
178 unsigned long diff;
179 u32 div;
180
181 /*
182 * If the target rate has already been achieved perfectly we can't do
183 * better.
184 */
185 if (params->diff == 0)
186 return;
187
188 /*
189 * Compute the input clock rate and internal divisor values to obtain
190 * the clock rate closest to the target frequency.
191 */
192 rate = clk_round_rate(clk, target);
193 div = clamp(DIV_ROUND_CLOSEST(rate, target), 1UL, 64UL) - 1;
194 diff = abs(rate / (div + 1) - target);
195
196 /*
197 * Store the parameters if the resulting frequency is better than any
198 * previously calculated value.
199 */
200 if (diff < params->diff) {
201 params->clk = clk;
202 params->rate = rate;
203 params->diff = diff;
204 params->escr = escr | div;
205 }
206}
207
208static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
209 { .soc_id = "r8a7795", .revision = "ES1.*" },
210 { /* sentinel */ }
211};
212
213static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
214{
215 const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
216 struct rcar_du_device *rcdu = rcrtc->dev;
217 unsigned long mode_clock = mode->clock * 1000;
218 u32 dsmr;
219 u32 escr;
220
221 if (rcdu->info->dpll_mask & (1 << rcrtc->index)) {
222 unsigned long target = mode_clock;
223 struct dpll_info dpll = { 0 };
224 unsigned long extclk;
225 u32 dpllcr;
226 u32 div = 0;
227
228 /*
229 * DU channels that have a display PLL can't use the internal
230 * system clock, and have no internal clock divider.
231 */
232
233 /*
234 * The H3 ES1.x exhibits dot clock duty cycle stability issues.
235 * We can work around them by configuring the DPLL to twice the
236 * desired frequency, coupled with a /2 post-divider. Restrict
237 * the workaround to H3 ES1.x as ES2.0 and all other SoCs have
238 * no post-divider when a display PLL is present (as shown by
239 * the workaround breaking HDMI output on M3-W during testing).
240 */
241 if (soc_device_match(rcar_du_r8a7795_es1)) {
242 target *= 2;
243 div = 1;
244 }
245
246 extclk = clk_get_rate(rcrtc->extclock);
247 rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
248
249 dpllcr = DPLLCR_CODE | DPLLCR_CLKE
250 | DPLLCR_FDPLL(dpll.fdpll)
251 | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
252 | DPLLCR_STBY;
253
254 if (rcrtc->index == 1)
255 dpllcr |= DPLLCR_PLCS1
256 | DPLLCR_INCS_DOTCLKIN1;
257 else
258 dpllcr |= DPLLCR_PLCS0
259 | DPLLCR_INCS_DOTCLKIN0;
260
261 rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
262
263 escr = ESCR_DCLKSEL_DCLKIN | div;
264 } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
265 /*
266 * Use the LVDS PLL output as the dot clock when outputting to
267 * the LVDS encoder on an SoC that supports this clock routing
268 * option. We use the clock directly in that case, without any
269 * additional divider.
270 */
271 escr = ESCR_DCLKSEL_DCLKIN;
272 } else {
273 struct du_clk_params params = { .diff = (unsigned long)-1 };
274
275 rcar_du_escr_divider(rcrtc->clock, mode_clock,
276 ESCR_DCLKSEL_CLKS, ¶ms);
277 if (rcrtc->extclock)
278 rcar_du_escr_divider(rcrtc->extclock, mode_clock,
279 ESCR_DCLKSEL_DCLKIN, ¶ms);
280
281 dev_dbg(rcrtc->dev->dev, "mode clock %lu %s rate %lu\n",
282 mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext",
283 params.rate);
284
285 clk_set_rate(params.clk, params.rate);
286 escr = params.escr;
287 }
288
289 dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
290
291 rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
292 rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
293
294 /* Signal polarities */
295 dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
296 | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
297 | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0)
298 | DSMR_DIPM_DISP | DSMR_CSPM;
299 rcar_du_crtc_write(rcrtc, DSMR, dsmr);
300
301 /* Display timings */
302 rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
303 rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
304 mode->hdisplay - 19);
305 rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
306 mode->hsync_start - 1);
307 rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1);
308
309 rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal -
310 mode->crtc_vsync_end - 2);
311 rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal -
312 mode->crtc_vsync_end +
313 mode->crtc_vdisplay - 2);
314 rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal -
315 mode->crtc_vsync_end +
316 mode->crtc_vsync_start - 1);
317 rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
318
319 rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
320 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
321}
322
323static unsigned int plane_zpos(struct rcar_du_plane *plane)
324{
325 return plane->plane.state->normalized_zpos;
326}
327
328static const struct rcar_du_format_info *
329plane_format(struct rcar_du_plane *plane)
330{
331 return to_rcar_plane_state(plane->plane.state)->format;
332}
333
334static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
335{
336 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
337 struct rcar_du_device *rcdu = rcrtc->dev;
338 unsigned int num_planes = 0;
339 unsigned int dptsr_planes;
340 unsigned int hwplanes = 0;
341 unsigned int prio = 0;
342 unsigned int i;
343 u32 dspr = 0;
344
345 for (i = 0; i < rcrtc->group->num_planes; ++i) {
346 struct rcar_du_plane *plane = &rcrtc->group->planes[i];
347 unsigned int j;
348
349 if (plane->plane.state->crtc != &rcrtc->crtc ||
350 !plane->plane.state->visible)
351 continue;
352
353 /* Insert the plane in the sorted planes array. */
354 for (j = num_planes++; j > 0; --j) {
355 if (plane_zpos(planes[j-1]) <= plane_zpos(plane))
356 break;
357 planes[j] = planes[j-1];
358 }
359
360 planes[j] = plane;
361 prio += plane_format(plane)->planes * 4;
362 }
363
364 for (i = 0; i < num_planes; ++i) {
365 struct rcar_du_plane *plane = planes[i];
366 struct drm_plane_state *state = plane->plane.state;
367 unsigned int index = to_rcar_plane_state(state)->hwindex;
368
369 prio -= 4;
370 dspr |= (index + 1) << prio;
371 hwplanes |= 1 << index;
372
373 if (plane_format(plane)->planes == 2) {
374 index = (index + 1) % 8;
375
376 prio -= 4;
377 dspr |= (index + 1) << prio;
378 hwplanes |= 1 << index;
379 }
380 }
381
382 /* If VSP+DU integration is enabled the plane assignment is fixed. */
383 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
384 if (rcdu->info->gen < 3) {
385 dspr = (rcrtc->index % 2) + 1;
386 hwplanes = 1 << (rcrtc->index % 2);
387 } else {
388 dspr = (rcrtc->index % 2) ? 3 : 1;
389 hwplanes = 1 << ((rcrtc->index % 2) ? 2 : 0);
390 }
391 }
392
393 /*
394 * Update the planes to display timing and dot clock generator
395 * associations.
396 *
397 * Updating the DPTSR register requires restarting the CRTC group,
398 * resulting in visible flicker. To mitigate the issue only update the
399 * association if needed by enabled planes. Planes being disabled will
400 * keep their current association.
401 */
402 mutex_lock(&rcrtc->group->lock);
403
404 dptsr_planes = rcrtc->index % 2 ? rcrtc->group->dptsr_planes | hwplanes
405 : rcrtc->group->dptsr_planes & ~hwplanes;
406
407 if (dptsr_planes != rcrtc->group->dptsr_planes) {
408 rcar_du_group_write(rcrtc->group, DPTSR,
409 (dptsr_planes << 16) | dptsr_planes);
410 rcrtc->group->dptsr_planes = dptsr_planes;
411
412 if (rcrtc->group->used_crtcs)
413 rcar_du_group_restart(rcrtc->group);
414 }
415
416 /* Restart the group if plane sources have changed. */
417 if (rcrtc->group->need_restart)
418 rcar_du_group_restart(rcrtc->group);
419
420 mutex_unlock(&rcrtc->group->lock);
421
422 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
423 dspr);
424}
425
426/* -----------------------------------------------------------------------------
427 * Page Flip
428 */
429
430void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
431{
432 struct drm_pending_vblank_event *event;
433 struct drm_device *dev = rcrtc->crtc.dev;
434 unsigned long flags;
435
436 spin_lock_irqsave(&dev->event_lock, flags);
437 event = rcrtc->event;
438 rcrtc->event = NULL;
439 spin_unlock_irqrestore(&dev->event_lock, flags);
440
441 if (event == NULL)
442 return;
443
444 spin_lock_irqsave(&dev->event_lock, flags);
445 drm_crtc_send_vblank_event(&rcrtc->crtc, event);
446 wake_up(&rcrtc->flip_wait);
447 spin_unlock_irqrestore(&dev->event_lock, flags);
448
449 drm_crtc_vblank_put(&rcrtc->crtc);
450}
451
452static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
453{
454 struct drm_device *dev = rcrtc->crtc.dev;
455 unsigned long flags;
456 bool pending;
457
458 spin_lock_irqsave(&dev->event_lock, flags);
459 pending = rcrtc->event != NULL;
460 spin_unlock_irqrestore(&dev->event_lock, flags);
461
462 return pending;
463}
464
465static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
466{
467 struct rcar_du_device *rcdu = rcrtc->dev;
468
469 if (wait_event_timeout(rcrtc->flip_wait,
470 !rcar_du_crtc_page_flip_pending(rcrtc),
471 msecs_to_jiffies(50)))
472 return;
473
474 dev_warn(rcdu->dev, "page flip timeout\n");
475
476 rcar_du_crtc_finish_page_flip(rcrtc);
477}
478
479/* -----------------------------------------------------------------------------
480 * Color Management Module (CMM)
481 */
482
483static int rcar_du_cmm_check(struct drm_crtc *crtc,
484 struct drm_crtc_state *state)
485{
486 struct drm_property_blob *drm_lut = state->gamma_lut;
487 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
488 struct device *dev = rcrtc->dev->dev;
489
490 if (!drm_lut)
491 return 0;
492
493 /* We only accept fully populated LUT tables. */
494 if (drm_color_lut_size(drm_lut) != CM2_LUT_SIZE) {
495 dev_err(dev, "invalid gamma lut size: %zu bytes\n",
496 drm_lut->length);
497 return -EINVAL;
498 }
499
500 return 0;
501}
502
503static void rcar_du_cmm_setup(struct drm_crtc *crtc)
504{
505 struct drm_property_blob *drm_lut = crtc->state->gamma_lut;
506 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
507 struct rcar_cmm_config cmm_config = {};
508
509 if (!rcrtc->cmm)
510 return;
511
512 if (drm_lut)
513 cmm_config.lut.table = (struct drm_color_lut *)drm_lut->data;
514
515 rcar_cmm_setup(rcrtc->cmm, &cmm_config);
516}
517
518/* -----------------------------------------------------------------------------
519 * Start/Stop and Suspend/Resume
520 */
521
522static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
523{
524 /* Set display off and background to black */
525 rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
526 rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
527
528 /* Configure display timings and output routing */
529 rcar_du_crtc_set_display_timing(rcrtc);
530 rcar_du_group_set_routing(rcrtc->group);
531
532 /* Start with all planes disabled. */
533 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
534
535 /* Enable the VSP compositor. */
536 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
537 rcar_du_vsp_enable(rcrtc);
538
539 /* Turn vertical blanking interrupt reporting on. */
540 drm_crtc_vblank_on(&rcrtc->crtc);
541}
542
543static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
544{
545 int ret;
546
547 /*
548 * Guard against double-get, as the function is called from both the
549 * .atomic_enable() and .atomic_begin() handlers.
550 */
551 if (rcrtc->initialized)
552 return 0;
553
554 ret = clk_prepare_enable(rcrtc->clock);
555 if (ret < 0)
556 return ret;
557
558 ret = clk_prepare_enable(rcrtc->extclock);
559 if (ret < 0)
560 goto error_clock;
561
562 ret = rcar_du_group_get(rcrtc->group);
563 if (ret < 0)
564 goto error_group;
565
566 rcar_du_crtc_setup(rcrtc);
567 rcrtc->initialized = true;
568
569 return 0;
570
571error_group:
572 clk_disable_unprepare(rcrtc->extclock);
573error_clock:
574 clk_disable_unprepare(rcrtc->clock);
575 return ret;
576}
577
578static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
579{
580 rcar_du_group_put(rcrtc->group);
581
582 clk_disable_unprepare(rcrtc->extclock);
583 clk_disable_unprepare(rcrtc->clock);
584
585 rcrtc->initialized = false;
586}
587
588static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
589{
590 bool interlaced;
591
592 /*
593 * Select master sync mode. This enables display operation in master
594 * sync mode (with the HSYNC and VSYNC signals configured as outputs and
595 * actively driven).
596 */
597 interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE;
598 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK | DSYSR_SCM_MASK,
599 (interlaced ? DSYSR_SCM_INT_VIDEO : 0) |
600 DSYSR_TVM_MASTER);
601
602 rcar_du_group_start_stop(rcrtc->group, true);
603}
604
605static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
606{
607 struct rcar_du_device *rcdu = rcrtc->dev;
608 struct drm_crtc *crtc = &rcrtc->crtc;
609 u32 status;
610
611 /* Make sure vblank interrupts are enabled. */
612 drm_crtc_vblank_get(crtc);
613
614 /*
615 * Disable planes and calculate how many vertical blanking interrupts we
616 * have to wait for. If a vertical blanking interrupt has been triggered
617 * but not processed yet, we don't know whether it occurred before or
618 * after the planes got disabled. We thus have to wait for two vblank
619 * interrupts in that case.
620 */
621 spin_lock_irq(&rcrtc->vblank_lock);
622 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
623 status = rcar_du_crtc_read(rcrtc, DSSR);
624 rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
625 spin_unlock_irq(&rcrtc->vblank_lock);
626
627 if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
628 msecs_to_jiffies(100)))
629 dev_warn(rcdu->dev, "vertical blanking timeout\n");
630
631 drm_crtc_vblank_put(crtc);
632}
633
634static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
635{
636 struct drm_crtc *crtc = &rcrtc->crtc;
637
638 /*
639 * Disable all planes and wait for the change to take effect. This is
640 * required as the plane enable registers are updated on vblank, and no
641 * vblank will occur once the CRTC is stopped. Disabling planes when
642 * starting the CRTC thus wouldn't be enough as it would start scanning
643 * out immediately from old frame buffers until the next vblank.
644 *
645 * This increases the CRTC stop delay, especially when multiple CRTCs
646 * are stopped in one operation as we now wait for one vblank per CRTC.
647 * Whether this can be improved needs to be researched.
648 */
649 rcar_du_crtc_disable_planes(rcrtc);
650
651 /*
652 * Disable vertical blanking interrupt reporting. We first need to wait
653 * for page flip completion before stopping the CRTC as userspace
654 * expects page flips to eventually complete.
655 */
656 rcar_du_crtc_wait_page_flip(rcrtc);
657 drm_crtc_vblank_off(crtc);
658
659 /* Disable the VSP compositor. */
660 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
661 rcar_du_vsp_disable(rcrtc);
662
663 if (rcrtc->cmm)
664 rcar_cmm_disable(rcrtc->cmm);
665
666 /*
667 * Select switch sync mode. This stops display operation and configures
668 * the HSYNC and VSYNC signals as inputs.
669 *
670 * TODO: Find another way to stop the display for DUs that don't support
671 * TVM sync.
672 */
673 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_TVM_SYNC))
674 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK,
675 DSYSR_TVM_SWITCH);
676
677 rcar_du_group_start_stop(rcrtc->group, false);
678}
679
680/* -----------------------------------------------------------------------------
681 * CRTC Functions
682 */
683
684static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
685 struct drm_atomic_state *state)
686{
687 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
688 crtc);
689 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc_state);
690 struct drm_encoder *encoder;
691 int ret;
692
693 ret = rcar_du_cmm_check(crtc, crtc_state);
694 if (ret)
695 return ret;
696
697 /* Store the routes from the CRTC output to the DU outputs. */
698 rstate->outputs = 0;
699
700 drm_for_each_encoder_mask(encoder, crtc->dev,
701 crtc_state->encoder_mask) {
702 struct rcar_du_encoder *renc;
703
704 /* Skip the writeback encoder. */
705 if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
706 continue;
707
708 renc = to_rcar_encoder(encoder);
709 rstate->outputs |= BIT(renc->output);
710 }
711
712 return 0;
713}
714
715static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
716 struct drm_atomic_state *state)
717{
718 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
719 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
720 struct rcar_du_device *rcdu = rcrtc->dev;
721
722 if (rcrtc->cmm)
723 rcar_cmm_enable(rcrtc->cmm);
724 rcar_du_crtc_get(rcrtc);
725
726 /*
727 * On D3/E3 the dot clock is provided by the LVDS encoder attached to
728 * the DU channel. We need to enable its clock output explicitly if
729 * the LVDS output is disabled.
730 */
731 if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
732 rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
733 struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
734 const struct drm_display_mode *mode =
735 &crtc->state->adjusted_mode;
736
737 rcar_lvds_clk_enable(bridge, mode->clock * 1000);
738 }
739
740 rcar_du_crtc_start(rcrtc);
741
742 /*
743 * TODO: The chip manual indicates that CMM tables should be written
744 * after the DU channel has been activated. Investigate the impact
745 * of this restriction on the first displayed frame.
746 */
747 rcar_du_cmm_setup(crtc);
748}
749
750static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
751 struct drm_atomic_state *state)
752{
753 struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
754 crtc);
755 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
756 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
757 struct rcar_du_device *rcdu = rcrtc->dev;
758
759 rcar_du_crtc_stop(rcrtc);
760 rcar_du_crtc_put(rcrtc);
761
762 if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
763 rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
764 struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
765
766 /*
767 * Disable the LVDS clock output, see
768 * rcar_du_crtc_atomic_enable().
769 */
770 rcar_lvds_clk_disable(bridge);
771 }
772
773 spin_lock_irq(&crtc->dev->event_lock);
774 if (crtc->state->event) {
775 drm_crtc_send_vblank_event(crtc, crtc->state->event);
776 crtc->state->event = NULL;
777 }
778 spin_unlock_irq(&crtc->dev->event_lock);
779}
780
781static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
782 struct drm_atomic_state *state)
783{
784 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
785
786 WARN_ON(!crtc->state->enable);
787
788 /*
789 * If a mode set is in progress we can be called with the CRTC disabled.
790 * We thus need to first get and setup the CRTC in order to configure
791 * planes. We must *not* put the CRTC in .atomic_flush(), as it must be
792 * kept awake until the .atomic_enable() call that will follow. The get
793 * operation in .atomic_enable() will in that case be a no-op, and the
794 * CRTC will be put later in .atomic_disable().
795 *
796 * If a mode set is not in progress the CRTC is enabled, and the
797 * following get call will be a no-op. There is thus no need to balance
798 * it in .atomic_flush() either.
799 */
800 rcar_du_crtc_get(rcrtc);
801
802 /* If the active state changed, we let .atomic_enable handle CMM. */
803 if (crtc->state->color_mgmt_changed && !crtc->state->active_changed)
804 rcar_du_cmm_setup(crtc);
805
806 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
807 rcar_du_vsp_atomic_begin(rcrtc);
808}
809
810static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
811 struct drm_atomic_state *state)
812{
813 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
814 struct drm_device *dev = rcrtc->crtc.dev;
815 unsigned long flags;
816
817 rcar_du_crtc_update_planes(rcrtc);
818
819 if (crtc->state->event) {
820 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
821
822 spin_lock_irqsave(&dev->event_lock, flags);
823 rcrtc->event = crtc->state->event;
824 crtc->state->event = NULL;
825 spin_unlock_irqrestore(&dev->event_lock, flags);
826 }
827
828 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
829 rcar_du_vsp_atomic_flush(rcrtc);
830}
831
832static enum drm_mode_status
833rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
834 const struct drm_display_mode *mode)
835{
836 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
837 struct rcar_du_device *rcdu = rcrtc->dev;
838 bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
839 unsigned int vbp;
840
841 if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
842 return MODE_NO_INTERLACE;
843
844 /*
845 * The hardware requires a minimum combined horizontal sync and back
846 * porch of 20 pixels and a minimum vertical back porch of 3 lines.
847 */
848 if (mode->htotal - mode->hsync_start < 20)
849 return MODE_HBLANK_NARROW;
850
851 vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
852 if (vbp < 3)
853 return MODE_VBLANK_NARROW;
854
855 return MODE_OK;
856}
857
858static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
859 .atomic_check = rcar_du_crtc_atomic_check,
860 .atomic_begin = rcar_du_crtc_atomic_begin,
861 .atomic_flush = rcar_du_crtc_atomic_flush,
862 .atomic_enable = rcar_du_crtc_atomic_enable,
863 .atomic_disable = rcar_du_crtc_atomic_disable,
864 .mode_valid = rcar_du_crtc_mode_valid,
865};
866
867static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
868{
869 struct rcar_du_device *rcdu = rcrtc->dev;
870 const char **sources;
871 unsigned int count;
872 int i = -1;
873
874 /* CRC available only on Gen3 HW. */
875 if (rcdu->info->gen < 3)
876 return;
877
878 /* Reserve 1 for "auto" source. */
879 count = rcrtc->vsp->num_planes + 1;
880
881 sources = kmalloc_array(count, sizeof(*sources), GFP_KERNEL);
882 if (!sources)
883 return;
884
885 sources[0] = kstrdup("auto", GFP_KERNEL);
886 if (!sources[0])
887 goto error;
888
889 for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
890 struct drm_plane *plane = &rcrtc->vsp->planes[i].plane;
891 char name[16];
892
893 sprintf(name, "plane%u", plane->base.id);
894 sources[i + 1] = kstrdup(name, GFP_KERNEL);
895 if (!sources[i + 1])
896 goto error;
897 }
898
899 rcrtc->sources = sources;
900 rcrtc->sources_count = count;
901 return;
902
903error:
904 while (i >= 0) {
905 kfree(sources[i]);
906 i--;
907 }
908 kfree(sources);
909}
910
911static void rcar_du_crtc_crc_cleanup(struct rcar_du_crtc *rcrtc)
912{
913 unsigned int i;
914
915 if (!rcrtc->sources)
916 return;
917
918 for (i = 0; i < rcrtc->sources_count; i++)
919 kfree(rcrtc->sources[i]);
920 kfree(rcrtc->sources);
921
922 rcrtc->sources = NULL;
923 rcrtc->sources_count = 0;
924}
925
926static struct drm_crtc_state *
927rcar_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
928{
929 struct rcar_du_crtc_state *state;
930 struct rcar_du_crtc_state *copy;
931
932 if (WARN_ON(!crtc->state))
933 return NULL;
934
935 state = to_rcar_crtc_state(crtc->state);
936 copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
937 if (copy == NULL)
938 return NULL;
939
940 __drm_atomic_helper_crtc_duplicate_state(crtc, ©->state);
941
942 return ©->state;
943}
944
945static void rcar_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
946 struct drm_crtc_state *state)
947{
948 __drm_atomic_helper_crtc_destroy_state(state);
949 kfree(to_rcar_crtc_state(state));
950}
951
952static void rcar_du_crtc_cleanup(struct drm_crtc *crtc)
953{
954 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
955
956 rcar_du_crtc_crc_cleanup(rcrtc);
957
958 return drm_crtc_cleanup(crtc);
959}
960
961static void rcar_du_crtc_reset(struct drm_crtc *crtc)
962{
963 struct rcar_du_crtc_state *state;
964
965 if (crtc->state) {
966 rcar_du_crtc_atomic_destroy_state(crtc, crtc->state);
967 crtc->state = NULL;
968 }
969
970 state = kzalloc(sizeof(*state), GFP_KERNEL);
971 if (state == NULL)
972 return;
973
974 state->crc.source = VSP1_DU_CRC_NONE;
975 state->crc.index = 0;
976
977 __drm_atomic_helper_crtc_reset(crtc, &state->state);
978}
979
980static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
981{
982 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
983
984 rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
985 rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
986 rcrtc->vblank_enable = true;
987
988 return 0;
989}
990
991static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
992{
993 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
994
995 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
996 rcrtc->vblank_enable = false;
997}
998
999static int rcar_du_crtc_parse_crc_source(struct rcar_du_crtc *rcrtc,
1000 const char *source_name,
1001 enum vsp1_du_crc_source *source)
1002{
1003 unsigned int index;
1004 int ret;
1005
1006 /*
1007 * Parse the source name. Supported values are "plane%u" to compute the
1008 * CRC on an input plane (%u is the plane ID), and "auto" to compute the
1009 * CRC on the composer (VSP) output.
1010 */
1011
1012 if (!source_name) {
1013 *source = VSP1_DU_CRC_NONE;
1014 return 0;
1015 } else if (!strcmp(source_name, "auto")) {
1016 *source = VSP1_DU_CRC_OUTPUT;
1017 return 0;
1018 } else if (strstarts(source_name, "plane")) {
1019 unsigned int i;
1020
1021 *source = VSP1_DU_CRC_PLANE;
1022
1023 ret = kstrtouint(source_name + strlen("plane"), 10, &index);
1024 if (ret < 0)
1025 return ret;
1026
1027 for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
1028 if (index == rcrtc->vsp->planes[i].plane.base.id)
1029 return i;
1030 }
1031 }
1032
1033 return -EINVAL;
1034}
1035
1036static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
1037 const char *source_name,
1038 size_t *values_cnt)
1039{
1040 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
1041 enum vsp1_du_crc_source source;
1042
1043 if (rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source) < 0) {
1044 DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
1045 return -EINVAL;
1046 }
1047
1048 *values_cnt = 1;
1049 return 0;
1050}
1051
1052static const char *const *
1053rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count)
1054{
1055 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
1056
1057 *count = rcrtc->sources_count;
1058 return rcrtc->sources;
1059}
1060
1061static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
1062 const char *source_name)
1063{
1064 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
1065 struct drm_modeset_acquire_ctx ctx;
1066 struct drm_crtc_state *crtc_state;
1067 struct drm_atomic_state *state;
1068 enum vsp1_du_crc_source source;
1069 unsigned int index;
1070 int ret;
1071
1072 ret = rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source);
1073 if (ret < 0)
1074 return ret;
1075
1076 index = ret;
1077
1078 /* Perform an atomic commit to set the CRC source. */
1079 drm_modeset_acquire_init(&ctx, 0);
1080
1081 state = drm_atomic_state_alloc(crtc->dev);
1082 if (!state) {
1083 ret = -ENOMEM;
1084 goto unlock;
1085 }
1086
1087 state->acquire_ctx = &ctx;
1088
1089retry:
1090 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1091 if (!IS_ERR(crtc_state)) {
1092 struct rcar_du_crtc_state *rcrtc_state;
1093
1094 rcrtc_state = to_rcar_crtc_state(crtc_state);
1095 rcrtc_state->crc.source = source;
1096 rcrtc_state->crc.index = index;
1097
1098 ret = drm_atomic_commit(state);
1099 } else {
1100 ret = PTR_ERR(crtc_state);
1101 }
1102
1103 if (ret == -EDEADLK) {
1104 drm_atomic_state_clear(state);
1105 drm_modeset_backoff(&ctx);
1106 goto retry;
1107 }
1108
1109 drm_atomic_state_put(state);
1110
1111unlock:
1112 drm_modeset_drop_locks(&ctx);
1113 drm_modeset_acquire_fini(&ctx);
1114
1115 return ret;
1116}
1117
1118static const struct drm_crtc_funcs crtc_funcs_gen2 = {
1119 .reset = rcar_du_crtc_reset,
1120 .destroy = drm_crtc_cleanup,
1121 .set_config = drm_atomic_helper_set_config,
1122 .page_flip = drm_atomic_helper_page_flip,
1123 .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
1124 .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
1125 .enable_vblank = rcar_du_crtc_enable_vblank,
1126 .disable_vblank = rcar_du_crtc_disable_vblank,
1127};
1128
1129static const struct drm_crtc_funcs crtc_funcs_gen3 = {
1130 .reset = rcar_du_crtc_reset,
1131 .destroy = rcar_du_crtc_cleanup,
1132 .set_config = drm_atomic_helper_set_config,
1133 .page_flip = drm_atomic_helper_page_flip,
1134 .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
1135 .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
1136 .enable_vblank = rcar_du_crtc_enable_vblank,
1137 .disable_vblank = rcar_du_crtc_disable_vblank,
1138 .set_crc_source = rcar_du_crtc_set_crc_source,
1139 .verify_crc_source = rcar_du_crtc_verify_crc_source,
1140 .get_crc_sources = rcar_du_crtc_get_crc_sources,
1141};
1142
1143/* -----------------------------------------------------------------------------
1144 * Interrupt Handling
1145 */
1146
1147static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
1148{
1149 struct rcar_du_crtc *rcrtc = arg;
1150 struct rcar_du_device *rcdu = rcrtc->dev;
1151 irqreturn_t ret = IRQ_NONE;
1152 u32 status;
1153
1154 spin_lock(&rcrtc->vblank_lock);
1155
1156 status = rcar_du_crtc_read(rcrtc, DSSR);
1157 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
1158
1159 if (status & DSSR_VBK) {
1160 /*
1161 * Wake up the vblank wait if the counter reaches 0. This must
1162 * be protected by the vblank_lock to avoid races in
1163 * rcar_du_crtc_disable_planes().
1164 */
1165 if (rcrtc->vblank_count) {
1166 if (--rcrtc->vblank_count == 0)
1167 wake_up(&rcrtc->vblank_wait);
1168 }
1169 }
1170
1171 spin_unlock(&rcrtc->vblank_lock);
1172
1173 if (status & DSSR_VBK) {
1174 if (rcdu->info->gen < 3) {
1175 drm_crtc_handle_vblank(&rcrtc->crtc);
1176 rcar_du_crtc_finish_page_flip(rcrtc);
1177 }
1178
1179 ret = IRQ_HANDLED;
1180 }
1181
1182 return ret;
1183}
1184
1185/* -----------------------------------------------------------------------------
1186 * Initialization
1187 */
1188
1189int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
1190 unsigned int hwindex)
1191{
1192 static const unsigned int mmio_offsets[] = {
1193 DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET
1194 };
1195
1196 struct rcar_du_device *rcdu = rgrp->dev;
1197 struct platform_device *pdev = to_platform_device(rcdu->dev);
1198 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[swindex];
1199 struct drm_crtc *crtc = &rcrtc->crtc;
1200 struct drm_plane *primary;
1201 unsigned int irqflags;
1202 struct clk *clk;
1203 char clk_name[9];
1204 char *name;
1205 int irq;
1206 int ret;
1207
1208 /* Get the CRTC clock and the optional external clock. */
1209 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
1210 sprintf(clk_name, "du.%u", hwindex);
1211 name = clk_name;
1212 } else {
1213 name = NULL;
1214 }
1215
1216 rcrtc->clock = devm_clk_get(rcdu->dev, name);
1217 if (IS_ERR(rcrtc->clock)) {
1218 dev_err(rcdu->dev, "no clock for DU channel %u\n", hwindex);
1219 return PTR_ERR(rcrtc->clock);
1220 }
1221
1222 sprintf(clk_name, "dclkin.%u", hwindex);
1223 clk = devm_clk_get(rcdu->dev, clk_name);
1224 if (!IS_ERR(clk)) {
1225 rcrtc->extclock = clk;
1226 } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
1227 return -EPROBE_DEFER;
1228 } else if (rcdu->info->dpll_mask & BIT(hwindex)) {
1229 /*
1230 * DU channels that have a display PLL can't use the internal
1231 * system clock and thus require an external clock.
1232 */
1233 ret = PTR_ERR(clk);
1234 dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret);
1235 return ret;
1236 }
1237
1238 init_waitqueue_head(&rcrtc->flip_wait);
1239 init_waitqueue_head(&rcrtc->vblank_wait);
1240 spin_lock_init(&rcrtc->vblank_lock);
1241
1242 rcrtc->dev = rcdu;
1243 rcrtc->group = rgrp;
1244 rcrtc->mmio_offset = mmio_offsets[hwindex];
1245 rcrtc->index = hwindex;
1246 rcrtc->dsysr = (rcrtc->index % 2 ? 0 : DSYSR_DRES) | DSYSR_TVM_TVSYNC;
1247
1248 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
1249 primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
1250 else
1251 primary = &rgrp->planes[swindex % 2].plane;
1252
1253 ret = drm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL,
1254 rcdu->info->gen <= 2 ?
1255 &crtc_funcs_gen2 : &crtc_funcs_gen3,
1256 NULL);
1257 if (ret < 0)
1258 return ret;
1259
1260 /* CMM might be disabled for this CRTC. */
1261 if (rcdu->cmms[swindex]) {
1262 rcrtc->cmm = rcdu->cmms[swindex];
1263 rgrp->cmms_mask |= BIT(hwindex % 2);
1264
1265 drm_mode_crtc_set_gamma_size(crtc, CM2_LUT_SIZE);
1266 drm_crtc_enable_color_mgmt(crtc, 0, false, CM2_LUT_SIZE);
1267 }
1268
1269 drm_crtc_helper_add(crtc, &crtc_helper_funcs);
1270
1271 /* Register the interrupt handler. */
1272 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
1273 /* The IRQ's are associated with the CRTC (sw)index. */
1274 irq = platform_get_irq(pdev, swindex);
1275 irqflags = 0;
1276 } else {
1277 irq = platform_get_irq(pdev, 0);
1278 irqflags = IRQF_SHARED;
1279 }
1280
1281 if (irq < 0) {
1282 dev_err(rcdu->dev, "no IRQ for CRTC %u\n", swindex);
1283 return irq;
1284 }
1285
1286 ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
1287 dev_name(rcdu->dev), rcrtc);
1288 if (ret < 0) {
1289 dev_err(rcdu->dev,
1290 "failed to register IRQ for CRTC %u\n", swindex);
1291 return ret;
1292 }
1293
1294 rcar_du_crtc_crc_init(rcrtc);
1295
1296 return 0;
1297}