Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/host1x.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/of_graph.h>
13#include <linux/platform_device.h>
14#include <linux/pm_runtime.h>
15#include <linux/reset.h>
16
17#include <drm/drm_atomic.h>
18#include <drm/drm_atomic_helper.h>
19#include <drm/drm_fourcc.h>
20#include <drm/drm_probe_helper.h>
21
22#include "drm.h"
23#include "dc.h"
24#include "plane.h"
25
26static const u32 tegra_shared_plane_formats[] = {
27 DRM_FORMAT_ARGB1555,
28 DRM_FORMAT_RGB565,
29 DRM_FORMAT_RGBA5551,
30 DRM_FORMAT_ARGB8888,
31 DRM_FORMAT_ABGR8888,
32 /* new on Tegra114 */
33 DRM_FORMAT_ABGR4444,
34 DRM_FORMAT_ABGR1555,
35 DRM_FORMAT_BGRA5551,
36 DRM_FORMAT_XRGB1555,
37 DRM_FORMAT_RGBX5551,
38 DRM_FORMAT_XBGR1555,
39 DRM_FORMAT_BGRX5551,
40 DRM_FORMAT_BGR565,
41 DRM_FORMAT_XRGB8888,
42 DRM_FORMAT_XBGR8888,
43 /* planar formats */
44 DRM_FORMAT_UYVY,
45 DRM_FORMAT_YUYV,
46 DRM_FORMAT_YUV420,
47 DRM_FORMAT_YUV422,
48};
49
50static const u64 tegra_shared_plane_modifiers[] = {
51 DRM_FORMAT_MOD_LINEAR,
52 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
53 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
54 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
55 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
56 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
57 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
58 DRM_FORMAT_MOD_INVALID
59};
60
61static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
62 unsigned int offset)
63{
64 if (offset >= 0x500 && offset <= 0x581) {
65 offset = 0x000 + (offset - 0x500);
66 return plane->offset + offset;
67 }
68
69 if (offset >= 0x700 && offset <= 0x73c) {
70 offset = 0x180 + (offset - 0x700);
71 return plane->offset + offset;
72 }
73
74 if (offset >= 0x800 && offset <= 0x83e) {
75 offset = 0x1c0 + (offset - 0x800);
76 return plane->offset + offset;
77 }
78
79 dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
80
81 return plane->offset + offset;
82}
83
84static inline u32 tegra_plane_readl(struct tegra_plane *plane,
85 unsigned int offset)
86{
87 return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
88}
89
90static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
91 unsigned int offset)
92{
93 tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
94}
95
96static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
97{
98 mutex_lock(&wgrp->lock);
99
100 if (wgrp->usecount == 0) {
101 pm_runtime_get_sync(wgrp->parent);
102 reset_control_deassert(wgrp->rst);
103 }
104
105 wgrp->usecount++;
106 mutex_unlock(&wgrp->lock);
107
108 return 0;
109}
110
111static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
112{
113 int err;
114
115 mutex_lock(&wgrp->lock);
116
117 if (wgrp->usecount == 1) {
118 err = reset_control_assert(wgrp->rst);
119 if (err < 0) {
120 pr_err("failed to assert reset for window group %u\n",
121 wgrp->index);
122 }
123
124 pm_runtime_put(wgrp->parent);
125 }
126
127 wgrp->usecount--;
128 mutex_unlock(&wgrp->lock);
129}
130
131int tegra_display_hub_prepare(struct tegra_display_hub *hub)
132{
133 unsigned int i;
134
135 /*
136 * XXX Enabling/disabling windowgroups needs to happen when the owner
137 * display controller is disabled. There's currently no good point at
138 * which this could be executed, so unconditionally enable all window
139 * groups for now.
140 */
141 for (i = 0; i < hub->soc->num_wgrps; i++) {
142 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
143
144 tegra_windowgroup_enable(wgrp);
145 }
146
147 return 0;
148}
149
150void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
151{
152 unsigned int i;
153
154 /*
155 * XXX Remove this once window groups can be more fine-grainedly
156 * enabled and disabled.
157 */
158 for (i = 0; i < hub->soc->num_wgrps; i++) {
159 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
160
161 tegra_windowgroup_disable(wgrp);
162 }
163}
164
165static void tegra_shared_plane_update(struct tegra_plane *plane)
166{
167 struct tegra_dc *dc = plane->dc;
168 unsigned long timeout;
169 u32 mask, value;
170
171 mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
172 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
173
174 timeout = jiffies + msecs_to_jiffies(1000);
175
176 while (time_before(jiffies, timeout)) {
177 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
178 if ((value & mask) == 0)
179 break;
180
181 usleep_range(100, 400);
182 }
183}
184
185static void tegra_shared_plane_activate(struct tegra_plane *plane)
186{
187 struct tegra_dc *dc = plane->dc;
188 unsigned long timeout;
189 u32 mask, value;
190
191 mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
192 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
193
194 timeout = jiffies + msecs_to_jiffies(1000);
195
196 while (time_before(jiffies, timeout)) {
197 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
198 if ((value & mask) == 0)
199 break;
200
201 usleep_range(100, 400);
202 }
203}
204
205static unsigned int
206tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
207{
208 unsigned int offset =
209 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
210
211 return tegra_dc_readl(dc, offset) & OWNER_MASK;
212}
213
214static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
215 struct tegra_plane *plane)
216{
217 struct device *dev = dc->dev;
218
219 if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
220 if (plane->dc == dc)
221 return true;
222
223 dev_WARN(dev, "head %u owns window %u but is not attached\n",
224 dc->pipe, plane->index);
225 }
226
227 return false;
228}
229
230static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
231 struct tegra_dc *new)
232{
233 unsigned int offset =
234 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
235 struct tegra_dc *old = plane->dc, *dc = new ? new : old;
236 struct device *dev = new ? new->dev : old->dev;
237 unsigned int owner, index = plane->index;
238 u32 value;
239
240 value = tegra_dc_readl(dc, offset);
241 owner = value & OWNER_MASK;
242
243 if (new && (owner != OWNER_MASK && owner != new->pipe)) {
244 dev_WARN(dev, "window %u owned by head %u\n", index, owner);
245 return -EBUSY;
246 }
247
248 /*
249 * This seems to happen whenever the head has been disabled with one
250 * or more windows being active. This is harmless because we'll just
251 * reassign the window to the new head anyway.
252 */
253 if (old && owner == OWNER_MASK)
254 dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
255 old->pipe, owner);
256
257 value &= ~OWNER_MASK;
258
259 if (new)
260 value |= OWNER(new->pipe);
261 else
262 value |= OWNER_MASK;
263
264 tegra_dc_writel(dc, value, offset);
265
266 plane->dc = new;
267
268 return 0;
269}
270
271static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
272 struct tegra_plane *plane)
273{
274 u32 value;
275 int err;
276
277 if (!tegra_dc_owns_shared_plane(dc, plane)) {
278 err = tegra_shared_plane_set_owner(plane, dc);
279 if (err < 0)
280 return;
281 }
282
283 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
284 value |= MODE_FOUR_LINES;
285 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
286
287 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
288 value = SLOTS(1);
289 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
290
291 /* disable watermark */
292 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
293 value &= ~LATENCY_CTL_MODE_ENABLE;
294 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
295
296 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
297 value |= WATERMARK_MASK;
298 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
299
300 /* pipe meter */
301 value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
302 value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
303 tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
304
305 /* mempool entries */
306 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
307 value = MEMPOOL_ENTRIES(0x331);
308 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
309
310 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
311 value &= ~THREAD_NUM_MASK;
312 value |= THREAD_NUM(plane->base.index);
313 value |= THREAD_GROUP_ENABLE;
314 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
315
316 tegra_shared_plane_update(plane);
317 tegra_shared_plane_activate(plane);
318}
319
320static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
321 struct tegra_plane *plane)
322{
323 tegra_shared_plane_set_owner(plane, NULL);
324}
325
326static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
327 struct drm_plane_state *state)
328{
329 struct tegra_plane_state *plane_state = to_tegra_plane_state(state);
330 struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
331 struct tegra_bo_tiling *tiling = &plane_state->tiling;
332 struct tegra_dc *dc = to_tegra_dc(state->crtc);
333 int err;
334
335 /* no need for further checks if the plane is being disabled */
336 if (!state->crtc || !state->fb)
337 return 0;
338
339 err = tegra_plane_format(state->fb->format->format,
340 &plane_state->format,
341 &plane_state->swap);
342 if (err < 0)
343 return err;
344
345 err = tegra_fb_get_tiling(state->fb, tiling);
346 if (err < 0)
347 return err;
348
349 if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
350 !dc->soc->supports_block_linear) {
351 DRM_ERROR("hardware doesn't support block linear mode\n");
352 return -EINVAL;
353 }
354
355 /*
356 * Tegra doesn't support different strides for U and V planes so we
357 * error out if the user tries to display a framebuffer with such a
358 * configuration.
359 */
360 if (state->fb->format->num_planes > 2) {
361 if (state->fb->pitches[2] != state->fb->pitches[1]) {
362 DRM_ERROR("unsupported UV-plane configuration\n");
363 return -EINVAL;
364 }
365 }
366
367 /* XXX scaling is not yet supported, add a check here */
368
369 err = tegra_plane_state_add(&tegra->base, state);
370 if (err < 0)
371 return err;
372
373 return 0;
374}
375
376static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
377 struct drm_plane_state *old_state)
378{
379 struct tegra_plane *p = to_tegra_plane(plane);
380 struct tegra_dc *dc;
381 u32 value;
382
383 /* rien ne va plus */
384 if (!old_state || !old_state->crtc)
385 return;
386
387 dc = to_tegra_dc(old_state->crtc);
388
389 /*
390 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
391 * on planes that are already disabled. Make sure we fallback to the
392 * head for this particular state instead of crashing.
393 */
394 if (WARN_ON(p->dc == NULL))
395 p->dc = dc;
396
397 pm_runtime_get_sync(dc->dev);
398
399 value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
400 value &= ~WIN_ENABLE;
401 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
402
403 tegra_dc_remove_shared_plane(dc, p);
404
405 pm_runtime_put(dc->dev);
406}
407
408static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
409 struct drm_plane_state *old_state)
410{
411 struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
412 struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
413 unsigned int zpos = plane->state->normalized_zpos;
414 struct drm_framebuffer *fb = plane->state->fb;
415 struct tegra_plane *p = to_tegra_plane(plane);
416 struct tegra_bo *bo;
417 dma_addr_t base;
418 u32 value;
419
420 /* rien ne va plus */
421 if (!plane->state->crtc || !plane->state->fb)
422 return;
423
424 if (!plane->state->visible) {
425 tegra_shared_plane_atomic_disable(plane, old_state);
426 return;
427 }
428
429 pm_runtime_get_sync(dc->dev);
430
431 tegra_dc_assign_shared_plane(dc, p);
432
433 tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
434
435 /* blending */
436 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
437 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
438 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
439 tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
440
441 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
442 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
443 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
444 tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
445
446 value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
447 tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
448
449 /* bypass scaling */
450 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
451 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
452
453 value = INPUT_SCALER_VBYPASS | INPUT_SCALER_HBYPASS;
454 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
455
456 /* disable compression */
457 tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
458
459 bo = tegra_fb_get_plane(fb, 0);
460 base = bo->paddr;
461
462 tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
463 tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
464
465 value = V_POSITION(plane->state->crtc_y) |
466 H_POSITION(plane->state->crtc_x);
467 tegra_plane_writel(p, value, DC_WIN_POSITION);
468
469 value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
470 tegra_plane_writel(p, value, DC_WIN_SIZE);
471
472 value = WIN_ENABLE | COLOR_EXPAND;
473 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
474
475 value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
476 tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
477
478 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
479 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
480
481 value = PITCH(fb->pitches[0]);
482 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
483
484 value = CLAMP_BEFORE_BLEND | DEGAMMA_SRGB | INPUT_RANGE_FULL;
485 tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
486
487 value = OFFSET_X(plane->state->src_y >> 16) |
488 OFFSET_Y(plane->state->src_x >> 16);
489 tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
490
491 if (dc->soc->supports_block_linear) {
492 unsigned long height = state->tiling.value;
493
494 /* XXX */
495 switch (state->tiling.mode) {
496 case TEGRA_BO_TILING_MODE_PITCH:
497 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
498 DC_WINBUF_SURFACE_KIND_PITCH;
499 break;
500
501 /* XXX not supported on Tegra186 and later */
502 case TEGRA_BO_TILING_MODE_TILED:
503 value = DC_WINBUF_SURFACE_KIND_TILED;
504 break;
505
506 case TEGRA_BO_TILING_MODE_BLOCK:
507 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
508 DC_WINBUF_SURFACE_KIND_BLOCK;
509 break;
510 }
511
512 tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
513 }
514
515 /* disable gamut CSC */
516 value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
517 value &= ~CONTROL_CSC_ENABLE;
518 tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
519
520 pm_runtime_put(dc->dev);
521}
522
523static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
524 .atomic_check = tegra_shared_plane_atomic_check,
525 .atomic_update = tegra_shared_plane_atomic_update,
526 .atomic_disable = tegra_shared_plane_atomic_disable,
527};
528
529struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
530 struct tegra_dc *dc,
531 unsigned int wgrp,
532 unsigned int index)
533{
534 enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
535 struct tegra_drm *tegra = drm->dev_private;
536 struct tegra_display_hub *hub = tegra->hub;
537 /* planes can be assigned to arbitrary CRTCs */
538 unsigned int possible_crtcs = 0x7;
539 struct tegra_shared_plane *plane;
540 unsigned int num_formats;
541 const u64 *modifiers;
542 struct drm_plane *p;
543 const u32 *formats;
544 int err;
545
546 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
547 if (!plane)
548 return ERR_PTR(-ENOMEM);
549
550 plane->base.offset = 0x0a00 + 0x0300 * index;
551 plane->base.index = index;
552
553 plane->wgrp = &hub->wgrps[wgrp];
554 plane->wgrp->parent = dc->dev;
555
556 p = &plane->base.base;
557
558 num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
559 formats = tegra_shared_plane_formats;
560 modifiers = tegra_shared_plane_modifiers;
561
562 err = drm_universal_plane_init(drm, p, possible_crtcs,
563 &tegra_plane_funcs, formats,
564 num_formats, modifiers, type, NULL);
565 if (err < 0) {
566 kfree(plane);
567 return ERR_PTR(err);
568 }
569
570 drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
571 drm_plane_create_zpos_property(p, 0, 0, 255);
572
573 return p;
574}
575
576static struct drm_private_state *
577tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
578{
579 struct tegra_display_hub_state *state;
580
581 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
582 if (!state)
583 return NULL;
584
585 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
586
587 return &state->base;
588}
589
590static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
591 struct drm_private_state *state)
592{
593 struct tegra_display_hub_state *hub_state =
594 to_tegra_display_hub_state(state);
595
596 kfree(hub_state);
597}
598
599static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
600 .atomic_duplicate_state = tegra_display_hub_duplicate_state,
601 .atomic_destroy_state = tegra_display_hub_destroy_state,
602};
603
604static struct tegra_display_hub_state *
605tegra_display_hub_get_state(struct tegra_display_hub *hub,
606 struct drm_atomic_state *state)
607{
608 struct drm_device *drm = dev_get_drvdata(hub->client.parent);
609 struct drm_private_state *priv;
610
611 WARN_ON(!drm_modeset_is_locked(&drm->mode_config.connection_mutex));
612
613 priv = drm_atomic_get_private_obj_state(state, &hub->base);
614 if (IS_ERR(priv))
615 return ERR_CAST(priv);
616
617 return to_tegra_display_hub_state(priv);
618}
619
620int tegra_display_hub_atomic_check(struct drm_device *drm,
621 struct drm_atomic_state *state)
622{
623 struct tegra_drm *tegra = drm->dev_private;
624 struct tegra_display_hub_state *hub_state;
625 struct drm_crtc_state *old, *new;
626 struct drm_crtc *crtc;
627 unsigned int i;
628
629 if (!tegra->hub)
630 return 0;
631
632 hub_state = tegra_display_hub_get_state(tegra->hub, state);
633 if (IS_ERR(hub_state))
634 return PTR_ERR(hub_state);
635
636 /*
637 * The display hub display clock needs to be fed by the display clock
638 * with the highest frequency to ensure proper functioning of all the
639 * displays.
640 *
641 * Note that this isn't used before Tegra186, but it doesn't hurt and
642 * conditionalizing it would make the code less clean.
643 */
644 for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
645 struct tegra_dc_state *dc = to_dc_state(new);
646
647 if (new->active) {
648 if (!hub_state->clk || dc->pclk > hub_state->rate) {
649 hub_state->dc = to_tegra_dc(dc->base.crtc);
650 hub_state->clk = hub_state->dc->clk;
651 hub_state->rate = dc->pclk;
652 }
653 }
654 }
655
656 return 0;
657}
658
659static void tegra_display_hub_update(struct tegra_dc *dc)
660{
661 u32 value;
662
663 pm_runtime_get_sync(dc->dev);
664
665 value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
666 value &= ~LATENCY_EVENT;
667 tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
668
669 value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
670 value = CURS_SLOTS(1) | WGRP_SLOTS(1);
671 tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
672
673 tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
674 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
675 tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
676 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
677
678 pm_runtime_put(dc->dev);
679}
680
681void tegra_display_hub_atomic_commit(struct drm_device *drm,
682 struct drm_atomic_state *state)
683{
684 struct tegra_drm *tegra = drm->dev_private;
685 struct tegra_display_hub *hub = tegra->hub;
686 struct tegra_display_hub_state *hub_state;
687 struct device *dev = hub->client.dev;
688 int err;
689
690 hub_state = to_tegra_display_hub_state(hub->base.state);
691
692 if (hub_state->clk) {
693 err = clk_set_rate(hub_state->clk, hub_state->rate);
694 if (err < 0)
695 dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
696 hub_state->clk, hub_state->rate);
697
698 err = clk_set_parent(hub->clk_disp, hub_state->clk);
699 if (err < 0)
700 dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
701 hub->clk_disp, hub_state->clk, err);
702 }
703
704 if (hub_state->dc)
705 tegra_display_hub_update(hub_state->dc);
706}
707
708static int tegra_display_hub_init(struct host1x_client *client)
709{
710 struct tegra_display_hub *hub = to_tegra_display_hub(client);
711 struct drm_device *drm = dev_get_drvdata(client->parent);
712 struct tegra_drm *tegra = drm->dev_private;
713 struct tegra_display_hub_state *state;
714
715 state = kzalloc(sizeof(*state), GFP_KERNEL);
716 if (!state)
717 return -ENOMEM;
718
719 drm_atomic_private_obj_init(drm, &hub->base, &state->base,
720 &tegra_display_hub_state_funcs);
721
722 tegra->hub = hub;
723
724 return 0;
725}
726
727static int tegra_display_hub_exit(struct host1x_client *client)
728{
729 struct drm_device *drm = dev_get_drvdata(client->parent);
730 struct tegra_drm *tegra = drm->dev_private;
731
732 drm_atomic_private_obj_fini(&tegra->hub->base);
733 tegra->hub = NULL;
734
735 return 0;
736}
737
738static const struct host1x_client_ops tegra_display_hub_ops = {
739 .init = tegra_display_hub_init,
740 .exit = tegra_display_hub_exit,
741};
742
743static int tegra_display_hub_probe(struct platform_device *pdev)
744{
745 struct device_node *child = NULL;
746 struct tegra_display_hub *hub;
747 struct clk *clk;
748 unsigned int i;
749 int err;
750
751 hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
752 if (!hub)
753 return -ENOMEM;
754
755 hub->soc = of_device_get_match_data(&pdev->dev);
756
757 hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
758 if (IS_ERR(hub->clk_disp)) {
759 err = PTR_ERR(hub->clk_disp);
760 return err;
761 }
762
763 if (hub->soc->supports_dsc) {
764 hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
765 if (IS_ERR(hub->clk_dsc)) {
766 err = PTR_ERR(hub->clk_dsc);
767 return err;
768 }
769 }
770
771 hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
772 if (IS_ERR(hub->clk_hub)) {
773 err = PTR_ERR(hub->clk_hub);
774 return err;
775 }
776
777 hub->rst = devm_reset_control_get(&pdev->dev, "misc");
778 if (IS_ERR(hub->rst)) {
779 err = PTR_ERR(hub->rst);
780 return err;
781 }
782
783 hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
784 sizeof(*hub->wgrps), GFP_KERNEL);
785 if (!hub->wgrps)
786 return -ENOMEM;
787
788 for (i = 0; i < hub->soc->num_wgrps; i++) {
789 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
790 char id[8];
791
792 snprintf(id, sizeof(id), "wgrp%u", i);
793 mutex_init(&wgrp->lock);
794 wgrp->usecount = 0;
795 wgrp->index = i;
796
797 wgrp->rst = devm_reset_control_get(&pdev->dev, id);
798 if (IS_ERR(wgrp->rst))
799 return PTR_ERR(wgrp->rst);
800
801 err = reset_control_assert(wgrp->rst);
802 if (err < 0)
803 return err;
804 }
805
806 hub->num_heads = of_get_child_count(pdev->dev.of_node);
807
808 hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
809 GFP_KERNEL);
810 if (!hub->clk_heads)
811 return -ENOMEM;
812
813 for (i = 0; i < hub->num_heads; i++) {
814 child = of_get_next_child(pdev->dev.of_node, child);
815 if (!child) {
816 dev_err(&pdev->dev, "failed to find node for head %u\n",
817 i);
818 return -ENODEV;
819 }
820
821 clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
822 if (IS_ERR(clk)) {
823 dev_err(&pdev->dev, "failed to get clock for head %u\n",
824 i);
825 of_node_put(child);
826 return PTR_ERR(clk);
827 }
828
829 hub->clk_heads[i] = clk;
830 }
831
832 of_node_put(child);
833
834 /* XXX: enable clock across reset? */
835 err = reset_control_assert(hub->rst);
836 if (err < 0)
837 return err;
838
839 platform_set_drvdata(pdev, hub);
840 pm_runtime_enable(&pdev->dev);
841
842 INIT_LIST_HEAD(&hub->client.list);
843 hub->client.ops = &tegra_display_hub_ops;
844 hub->client.dev = &pdev->dev;
845
846 err = host1x_client_register(&hub->client);
847 if (err < 0)
848 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
849 err);
850
851 return err;
852}
853
854static int tegra_display_hub_remove(struct platform_device *pdev)
855{
856 struct tegra_display_hub *hub = platform_get_drvdata(pdev);
857 int err;
858
859 err = host1x_client_unregister(&hub->client);
860 if (err < 0) {
861 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
862 err);
863 }
864
865 pm_runtime_disable(&pdev->dev);
866
867 return err;
868}
869
870static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
871{
872 struct tegra_display_hub *hub = dev_get_drvdata(dev);
873 unsigned int i = hub->num_heads;
874 int err;
875
876 err = reset_control_assert(hub->rst);
877 if (err < 0)
878 return err;
879
880 while (i--)
881 clk_disable_unprepare(hub->clk_heads[i]);
882
883 clk_disable_unprepare(hub->clk_hub);
884 clk_disable_unprepare(hub->clk_dsc);
885 clk_disable_unprepare(hub->clk_disp);
886
887 return 0;
888}
889
890static int __maybe_unused tegra_display_hub_resume(struct device *dev)
891{
892 struct tegra_display_hub *hub = dev_get_drvdata(dev);
893 unsigned int i;
894 int err;
895
896 err = clk_prepare_enable(hub->clk_disp);
897 if (err < 0)
898 return err;
899
900 err = clk_prepare_enable(hub->clk_dsc);
901 if (err < 0)
902 goto disable_disp;
903
904 err = clk_prepare_enable(hub->clk_hub);
905 if (err < 0)
906 goto disable_dsc;
907
908 for (i = 0; i < hub->num_heads; i++) {
909 err = clk_prepare_enable(hub->clk_heads[i]);
910 if (err < 0)
911 goto disable_heads;
912 }
913
914 err = reset_control_deassert(hub->rst);
915 if (err < 0)
916 goto disable_heads;
917
918 return 0;
919
920disable_heads:
921 while (i--)
922 clk_disable_unprepare(hub->clk_heads[i]);
923
924 clk_disable_unprepare(hub->clk_hub);
925disable_dsc:
926 clk_disable_unprepare(hub->clk_dsc);
927disable_disp:
928 clk_disable_unprepare(hub->clk_disp);
929 return err;
930}
931
932static const struct dev_pm_ops tegra_display_hub_pm_ops = {
933 SET_RUNTIME_PM_OPS(tegra_display_hub_suspend,
934 tegra_display_hub_resume, NULL)
935};
936
937static const struct tegra_display_hub_soc tegra186_display_hub = {
938 .num_wgrps = 6,
939 .supports_dsc = true,
940};
941
942static const struct tegra_display_hub_soc tegra194_display_hub = {
943 .num_wgrps = 6,
944 .supports_dsc = false,
945};
946
947static const struct of_device_id tegra_display_hub_of_match[] = {
948 {
949 .compatible = "nvidia,tegra194-display",
950 .data = &tegra194_display_hub
951 }, {
952 .compatible = "nvidia,tegra186-display",
953 .data = &tegra186_display_hub
954 }, {
955 /* sentinel */
956 }
957};
958MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
959
960struct platform_driver tegra_display_hub_driver = {
961 .driver = {
962 .name = "tegra-display-hub",
963 .of_match_table = tegra_display_hub_of_match,
964 .pm = &tegra_display_hub_pm_ops,
965 },
966 .probe = tegra_display_hub_probe,
967 .remove = tegra_display_hub_remove,
968};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/host1x.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/of_graph.h>
13#include <linux/platform_device.h>
14#include <linux/pm_runtime.h>
15#include <linux/reset.h>
16
17#include <drm/drm_atomic.h>
18#include <drm/drm_atomic_helper.h>
19#include <drm/drm_fourcc.h>
20#include <drm/drm_probe_helper.h>
21
22#include "drm.h"
23#include "dc.h"
24#include "plane.h"
25
26#define NFB 24
27
28static const u32 tegra_shared_plane_formats[] = {
29 DRM_FORMAT_ARGB1555,
30 DRM_FORMAT_RGB565,
31 DRM_FORMAT_RGBA5551,
32 DRM_FORMAT_ARGB8888,
33 DRM_FORMAT_ABGR8888,
34 /* new on Tegra114 */
35 DRM_FORMAT_ABGR4444,
36 DRM_FORMAT_ABGR1555,
37 DRM_FORMAT_BGRA5551,
38 DRM_FORMAT_XRGB1555,
39 DRM_FORMAT_RGBX5551,
40 DRM_FORMAT_XBGR1555,
41 DRM_FORMAT_BGRX5551,
42 DRM_FORMAT_BGR565,
43 DRM_FORMAT_XRGB8888,
44 DRM_FORMAT_XBGR8888,
45 /* planar formats */
46 DRM_FORMAT_UYVY,
47 DRM_FORMAT_YUYV,
48 DRM_FORMAT_YUV420,
49 DRM_FORMAT_YUV422,
50};
51
52static const u64 tegra_shared_plane_modifiers[] = {
53 DRM_FORMAT_MOD_LINEAR,
54 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
55 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
56 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
57 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
58 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
59 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
60 /*
61 * The GPU sector layout is only supported on Tegra194, but these will
62 * be filtered out later on by ->format_mod_supported() on SoCs where
63 * it isn't supported.
64 */
65 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
66 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
67 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
68 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
69 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
70 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
71 /* sentinel */
72 DRM_FORMAT_MOD_INVALID
73};
74
75static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
76 unsigned int offset)
77{
78 if (offset >= 0x500 && offset <= 0x581) {
79 offset = 0x000 + (offset - 0x500);
80 return plane->offset + offset;
81 }
82
83 if (offset >= 0x700 && offset <= 0x73c) {
84 offset = 0x180 + (offset - 0x700);
85 return plane->offset + offset;
86 }
87
88 if (offset >= 0x800 && offset <= 0x83e) {
89 offset = 0x1c0 + (offset - 0x800);
90 return plane->offset + offset;
91 }
92
93 dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
94
95 return plane->offset + offset;
96}
97
98static inline u32 tegra_plane_readl(struct tegra_plane *plane,
99 unsigned int offset)
100{
101 return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
102}
103
104static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
105 unsigned int offset)
106{
107 tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
108}
109
110static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
111{
112 int err = 0;
113
114 mutex_lock(&wgrp->lock);
115
116 if (wgrp->usecount == 0) {
117 err = host1x_client_resume(wgrp->parent);
118 if (err < 0) {
119 dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
120 goto unlock;
121 }
122
123 reset_control_deassert(wgrp->rst);
124 }
125
126 wgrp->usecount++;
127
128unlock:
129 mutex_unlock(&wgrp->lock);
130 return err;
131}
132
133static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
134{
135 int err;
136
137 mutex_lock(&wgrp->lock);
138
139 if (wgrp->usecount == 1) {
140 err = reset_control_assert(wgrp->rst);
141 if (err < 0) {
142 pr_err("failed to assert reset for window group %u\n",
143 wgrp->index);
144 }
145
146 host1x_client_suspend(wgrp->parent);
147 }
148
149 wgrp->usecount--;
150 mutex_unlock(&wgrp->lock);
151}
152
153int tegra_display_hub_prepare(struct tegra_display_hub *hub)
154{
155 unsigned int i;
156
157 /*
158 * XXX Enabling/disabling windowgroups needs to happen when the owner
159 * display controller is disabled. There's currently no good point at
160 * which this could be executed, so unconditionally enable all window
161 * groups for now.
162 */
163 for (i = 0; i < hub->soc->num_wgrps; i++) {
164 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
165
166 /* Skip orphaned window group whose parent DC is disabled */
167 if (wgrp->parent)
168 tegra_windowgroup_enable(wgrp);
169 }
170
171 return 0;
172}
173
174void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
175{
176 unsigned int i;
177
178 /*
179 * XXX Remove this once window groups can be more fine-grainedly
180 * enabled and disabled.
181 */
182 for (i = 0; i < hub->soc->num_wgrps; i++) {
183 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
184
185 /* Skip orphaned window group whose parent DC is disabled */
186 if (wgrp->parent)
187 tegra_windowgroup_disable(wgrp);
188 }
189}
190
191static void tegra_shared_plane_update(struct tegra_plane *plane)
192{
193 struct tegra_dc *dc = plane->dc;
194 unsigned long timeout;
195 u32 mask, value;
196
197 mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
198 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
199
200 timeout = jiffies + msecs_to_jiffies(1000);
201
202 while (time_before(jiffies, timeout)) {
203 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
204 if ((value & mask) == 0)
205 break;
206
207 usleep_range(100, 400);
208 }
209}
210
211static void tegra_shared_plane_activate(struct tegra_plane *plane)
212{
213 struct tegra_dc *dc = plane->dc;
214 unsigned long timeout;
215 u32 mask, value;
216
217 mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
218 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
219
220 timeout = jiffies + msecs_to_jiffies(1000);
221
222 while (time_before(jiffies, timeout)) {
223 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
224 if ((value & mask) == 0)
225 break;
226
227 usleep_range(100, 400);
228 }
229}
230
231static unsigned int
232tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
233{
234 unsigned int offset =
235 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
236
237 return tegra_dc_readl(dc, offset) & OWNER_MASK;
238}
239
240static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
241 struct tegra_plane *plane)
242{
243 struct device *dev = dc->dev;
244
245 if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
246 if (plane->dc == dc)
247 return true;
248
249 dev_WARN(dev, "head %u owns window %u but is not attached\n",
250 dc->pipe, plane->index);
251 }
252
253 return false;
254}
255
256static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
257 struct tegra_dc *new)
258{
259 unsigned int offset =
260 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
261 struct tegra_dc *old = plane->dc, *dc = new ? new : old;
262 struct device *dev = new ? new->dev : old->dev;
263 unsigned int owner, index = plane->index;
264 u32 value;
265
266 value = tegra_dc_readl(dc, offset);
267 owner = value & OWNER_MASK;
268
269 if (new && (owner != OWNER_MASK && owner != new->pipe)) {
270 dev_WARN(dev, "window %u owned by head %u\n", index, owner);
271 return -EBUSY;
272 }
273
274 /*
275 * This seems to happen whenever the head has been disabled with one
276 * or more windows being active. This is harmless because we'll just
277 * reassign the window to the new head anyway.
278 */
279 if (old && owner == OWNER_MASK)
280 dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
281 old->pipe, owner);
282
283 value &= ~OWNER_MASK;
284
285 if (new)
286 value |= OWNER(new->pipe);
287 else
288 value |= OWNER_MASK;
289
290 tegra_dc_writel(dc, value, offset);
291
292 plane->dc = new;
293
294 return 0;
295}
296
297static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
298{
299 static const unsigned int coeffs[192] = {
300 0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
301 0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
302 0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
303 0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
304 0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
305 0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
306 0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
307 0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
308 0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
309 0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
310 0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
311 0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
312 0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
313 0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
314 0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
315 0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
316 0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
317 0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
318 0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
319 0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
320 0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
321 0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
322 0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
323 0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
324 0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
325 0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
326 0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
327 0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
328 0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
329 0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
330 0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
331 0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
332 0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
333 0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
334 0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
335 0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
336 0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
337 0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
338 0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
339 0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
340 0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
341 0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
342 0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
343 0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
344 0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
345 0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
346 0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
347 0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
348 };
349 unsigned int ratio, row, column;
350
351 for (ratio = 0; ratio <= 2; ratio++) {
352 for (row = 0; row <= 15; row++) {
353 for (column = 0; column <= 3; column++) {
354 unsigned int index = (ratio << 6) + (row << 2) + column;
355 u32 value;
356
357 value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
358 tegra_plane_writel(plane, value,
359 DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
360 }
361 }
362 }
363}
364
365static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
366 struct tegra_plane *plane)
367{
368 u32 value;
369 int err;
370
371 if (!tegra_dc_owns_shared_plane(dc, plane)) {
372 err = tegra_shared_plane_set_owner(plane, dc);
373 if (err < 0)
374 return;
375 }
376
377 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
378 value |= MODE_FOUR_LINES;
379 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
380
381 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
382 value = SLOTS(1);
383 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
384
385 /* disable watermark */
386 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
387 value &= ~LATENCY_CTL_MODE_ENABLE;
388 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
389
390 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
391 value |= WATERMARK_MASK;
392 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
393
394 /* pipe meter */
395 value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
396 value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
397 tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
398
399 /* mempool entries */
400 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
401 value = MEMPOOL_ENTRIES(0x331);
402 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
403
404 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
405 value &= ~THREAD_NUM_MASK;
406 value |= THREAD_NUM(plane->base.index);
407 value |= THREAD_GROUP_ENABLE;
408 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
409
410 tegra_shared_plane_setup_scaler(plane);
411
412 tegra_shared_plane_update(plane);
413 tegra_shared_plane_activate(plane);
414}
415
416static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
417 struct tegra_plane *plane)
418{
419 tegra_shared_plane_set_owner(plane, NULL);
420}
421
422static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
423 struct drm_atomic_state *state)
424{
425 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
426 plane);
427 struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
428 struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
429 struct tegra_bo_tiling *tiling = &plane_state->tiling;
430 struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
431 int err;
432
433 /* no need for further checks if the plane is being disabled */
434 if (!new_plane_state->crtc || !new_plane_state->fb)
435 return 0;
436
437 err = tegra_plane_format(new_plane_state->fb->format->format,
438 &plane_state->format,
439 &plane_state->swap);
440 if (err < 0)
441 return err;
442
443 err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
444 if (err < 0)
445 return err;
446
447 if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
448 !dc->soc->supports_block_linear) {
449 DRM_ERROR("hardware doesn't support block linear mode\n");
450 return -EINVAL;
451 }
452
453 if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
454 !dc->soc->supports_sector_layout) {
455 DRM_ERROR("hardware doesn't support GPU sector layout\n");
456 return -EINVAL;
457 }
458
459 /*
460 * Tegra doesn't support different strides for U and V planes so we
461 * error out if the user tries to display a framebuffer with such a
462 * configuration.
463 */
464 if (new_plane_state->fb->format->num_planes > 2) {
465 if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
466 DRM_ERROR("unsupported UV-plane configuration\n");
467 return -EINVAL;
468 }
469 }
470
471 /* XXX scaling is not yet supported, add a check here */
472
473 err = tegra_plane_state_add(&tegra->base, new_plane_state);
474 if (err < 0)
475 return err;
476
477 return 0;
478}
479
480static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
481 struct drm_atomic_state *state)
482{
483 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
484 plane);
485 struct tegra_plane *p = to_tegra_plane(plane);
486 struct tegra_dc *dc;
487 u32 value;
488 int err;
489
490 /* rien ne va plus */
491 if (!old_state || !old_state->crtc)
492 return;
493
494 dc = to_tegra_dc(old_state->crtc);
495
496 err = host1x_client_resume(&dc->client);
497 if (err < 0) {
498 dev_err(dc->dev, "failed to resume: %d\n", err);
499 return;
500 }
501
502 /*
503 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
504 * on planes that are already disabled. Make sure we fallback to the
505 * head for this particular state instead of crashing.
506 */
507 if (WARN_ON(p->dc == NULL))
508 p->dc = dc;
509
510 value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
511 value &= ~WIN_ENABLE;
512 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
513
514 tegra_dc_remove_shared_plane(dc, p);
515
516 host1x_client_suspend(&dc->client);
517}
518
519static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
520{
521 u64 tmp, tmp1, tmp2;
522
523 tmp = (u64)dfixed_trunc(in);
524 tmp2 = (u64)out;
525 tmp1 = (tmp << NFB) + (tmp2 >> 1);
526 do_div(tmp1, tmp2);
527
528 return lower_32_bits(tmp1);
529}
530
531static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
532 struct drm_atomic_state *state)
533{
534 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
535 plane);
536 struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
537 struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
538 unsigned int zpos = new_state->normalized_zpos;
539 struct drm_framebuffer *fb = new_state->fb;
540 struct tegra_plane *p = to_tegra_plane(plane);
541 u32 value, min_width, bypass = 0;
542 dma_addr_t base, addr_flag = 0;
543 unsigned int bpc;
544 bool yuv, planar;
545 int err;
546
547 /* rien ne va plus */
548 if (!new_state->crtc || !new_state->fb)
549 return;
550
551 if (!new_state->visible) {
552 tegra_shared_plane_atomic_disable(plane, state);
553 return;
554 }
555
556 err = host1x_client_resume(&dc->client);
557 if (err < 0) {
558 dev_err(dc->dev, "failed to resume: %d\n", err);
559 return;
560 }
561
562 yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planar, &bpc);
563
564 tegra_dc_assign_shared_plane(dc, p);
565
566 tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
567
568 /* blending */
569 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
570 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
571 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
572 tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
573
574 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
575 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
576 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
577 tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
578
579 value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
580 tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
581
582 /* scaling */
583 min_width = min(new_state->src_w >> 16, new_state->crtc_w);
584
585 value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
586
587 if (min_width < MAX_PIXELS_5TAP444(value)) {
588 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
589 } else {
590 value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
591
592 if (min_width < MAX_PIXELS_2TAP444(value))
593 value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
594 else
595 dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
596 }
597
598 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
599 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
600
601 if (new_state->src_w != new_state->crtc_w << 16) {
602 fixed20_12 width = dfixed_init(new_state->src_w >> 16);
603 u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
604 u32 init = (1 << (NFB - 1)) + (incr >> 1);
605
606 tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
607 tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
608 } else {
609 bypass |= INPUT_SCALER_HBYPASS;
610 }
611
612 if (new_state->src_h != new_state->crtc_h << 16) {
613 fixed20_12 height = dfixed_init(new_state->src_h >> 16);
614 u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
615 u32 init = (1 << (NFB - 1)) + (incr >> 1);
616
617 tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
618 tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
619 } else {
620 bypass |= INPUT_SCALER_VBYPASS;
621 }
622
623 tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
624
625 /* disable compression */
626 tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
627
628#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
629 /*
630 * Physical address bit 39 in Tegra194 is used as a switch for special
631 * logic that swizzles the memory using either the legacy Tegra or the
632 * dGPU sector layout.
633 */
634 if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
635 addr_flag = BIT_ULL(39);
636#endif
637
638 base = tegra_plane_state->iova[0] + fb->offsets[0];
639 base |= addr_flag;
640
641 tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
642 tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
643
644 value = V_POSITION(new_state->crtc_y) |
645 H_POSITION(new_state->crtc_x);
646 tegra_plane_writel(p, value, DC_WIN_POSITION);
647
648 value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
649 tegra_plane_writel(p, value, DC_WIN_SIZE);
650
651 value = WIN_ENABLE | COLOR_EXPAND;
652 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
653
654 value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
655 tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
656
657 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
658 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
659
660 value = PITCH(fb->pitches[0]);
661 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
662
663 if (yuv && planar) {
664 base = tegra_plane_state->iova[1] + fb->offsets[1];
665 base |= addr_flag;
666
667 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
668 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
669
670 base = tegra_plane_state->iova[2] + fb->offsets[2];
671 base |= addr_flag;
672
673 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
674 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
675
676 value = PITCH_U(fb->pitches[2]) | PITCH_V(fb->pitches[2]);
677 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
678 } else {
679 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
680 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
681 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
682 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
683 tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
684 }
685
686 value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
687
688 if (yuv) {
689 if (bpc < 12)
690 value |= DEGAMMA_YUV8_10;
691 else
692 value |= DEGAMMA_YUV12;
693
694 /* XXX parameterize */
695 value |= COLOR_SPACE_YUV_2020;
696 } else {
697 if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
698 value |= DEGAMMA_SRGB;
699 }
700
701 tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
702
703 value = OFFSET_X(new_state->src_y >> 16) |
704 OFFSET_Y(new_state->src_x >> 16);
705 tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
706
707 if (dc->soc->supports_block_linear) {
708 unsigned long height = tegra_plane_state->tiling.value;
709
710 /* XXX */
711 switch (tegra_plane_state->tiling.mode) {
712 case TEGRA_BO_TILING_MODE_PITCH:
713 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
714 DC_WINBUF_SURFACE_KIND_PITCH;
715 break;
716
717 /* XXX not supported on Tegra186 and later */
718 case TEGRA_BO_TILING_MODE_TILED:
719 value = DC_WINBUF_SURFACE_KIND_TILED;
720 break;
721
722 case TEGRA_BO_TILING_MODE_BLOCK:
723 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
724 DC_WINBUF_SURFACE_KIND_BLOCK;
725 break;
726 }
727
728 tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
729 }
730
731 /* disable gamut CSC */
732 value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
733 value &= ~CONTROL_CSC_ENABLE;
734 tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
735
736 host1x_client_suspend(&dc->client);
737}
738
739static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
740 .prepare_fb = tegra_plane_prepare_fb,
741 .cleanup_fb = tegra_plane_cleanup_fb,
742 .atomic_check = tegra_shared_plane_atomic_check,
743 .atomic_update = tegra_shared_plane_atomic_update,
744 .atomic_disable = tegra_shared_plane_atomic_disable,
745};
746
747struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
748 struct tegra_dc *dc,
749 unsigned int wgrp,
750 unsigned int index)
751{
752 enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
753 struct tegra_drm *tegra = drm->dev_private;
754 struct tegra_display_hub *hub = tegra->hub;
755 struct tegra_shared_plane *plane;
756 unsigned int possible_crtcs;
757 unsigned int num_formats;
758 const u64 *modifiers;
759 struct drm_plane *p;
760 const u32 *formats;
761 int err;
762
763 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
764 if (!plane)
765 return ERR_PTR(-ENOMEM);
766
767 plane->base.offset = 0x0a00 + 0x0300 * index;
768 plane->base.index = index;
769
770 plane->wgrp = &hub->wgrps[wgrp];
771 plane->wgrp->parent = &dc->client;
772
773 p = &plane->base.base;
774
775 /* planes can be assigned to arbitrary CRTCs */
776 possible_crtcs = BIT(tegra->num_crtcs) - 1;
777
778 num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
779 formats = tegra_shared_plane_formats;
780 modifiers = tegra_shared_plane_modifiers;
781
782 err = drm_universal_plane_init(drm, p, possible_crtcs,
783 &tegra_plane_funcs, formats,
784 num_formats, modifiers, type, NULL);
785 if (err < 0) {
786 kfree(plane);
787 return ERR_PTR(err);
788 }
789
790 drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
791 drm_plane_create_zpos_property(p, 0, 0, 255);
792
793 return p;
794}
795
796static struct drm_private_state *
797tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
798{
799 struct tegra_display_hub_state *state;
800
801 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
802 if (!state)
803 return NULL;
804
805 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
806
807 return &state->base;
808}
809
810static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
811 struct drm_private_state *state)
812{
813 struct tegra_display_hub_state *hub_state =
814 to_tegra_display_hub_state(state);
815
816 kfree(hub_state);
817}
818
819static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
820 .atomic_duplicate_state = tegra_display_hub_duplicate_state,
821 .atomic_destroy_state = tegra_display_hub_destroy_state,
822};
823
824static struct tegra_display_hub_state *
825tegra_display_hub_get_state(struct tegra_display_hub *hub,
826 struct drm_atomic_state *state)
827{
828 struct drm_private_state *priv;
829
830 priv = drm_atomic_get_private_obj_state(state, &hub->base);
831 if (IS_ERR(priv))
832 return ERR_CAST(priv);
833
834 return to_tegra_display_hub_state(priv);
835}
836
837int tegra_display_hub_atomic_check(struct drm_device *drm,
838 struct drm_atomic_state *state)
839{
840 struct tegra_drm *tegra = drm->dev_private;
841 struct tegra_display_hub_state *hub_state;
842 struct drm_crtc_state *old, *new;
843 struct drm_crtc *crtc;
844 unsigned int i;
845
846 if (!tegra->hub)
847 return 0;
848
849 hub_state = tegra_display_hub_get_state(tegra->hub, state);
850 if (IS_ERR(hub_state))
851 return PTR_ERR(hub_state);
852
853 /*
854 * The display hub display clock needs to be fed by the display clock
855 * with the highest frequency to ensure proper functioning of all the
856 * displays.
857 *
858 * Note that this isn't used before Tegra186, but it doesn't hurt and
859 * conditionalizing it would make the code less clean.
860 */
861 for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
862 struct tegra_dc_state *dc = to_dc_state(new);
863
864 if (new->active) {
865 if (!hub_state->clk || dc->pclk > hub_state->rate) {
866 hub_state->dc = to_tegra_dc(dc->base.crtc);
867 hub_state->clk = hub_state->dc->clk;
868 hub_state->rate = dc->pclk;
869 }
870 }
871 }
872
873 return 0;
874}
875
876static void tegra_display_hub_update(struct tegra_dc *dc)
877{
878 u32 value;
879 int err;
880
881 err = host1x_client_resume(&dc->client);
882 if (err < 0) {
883 dev_err(dc->dev, "failed to resume: %d\n", err);
884 return;
885 }
886
887 value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
888 value &= ~LATENCY_EVENT;
889 tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
890
891 value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
892 value = CURS_SLOTS(1) | WGRP_SLOTS(1);
893 tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
894
895 tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
896 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
897 tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
898 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
899
900 host1x_client_suspend(&dc->client);
901}
902
903void tegra_display_hub_atomic_commit(struct drm_device *drm,
904 struct drm_atomic_state *state)
905{
906 struct tegra_drm *tegra = drm->dev_private;
907 struct tegra_display_hub *hub = tegra->hub;
908 struct tegra_display_hub_state *hub_state;
909 struct device *dev = hub->client.dev;
910 int err;
911
912 hub_state = to_tegra_display_hub_state(hub->base.state);
913
914 if (hub_state->clk) {
915 err = clk_set_rate(hub_state->clk, hub_state->rate);
916 if (err < 0)
917 dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
918 hub_state->clk, hub_state->rate);
919
920 err = clk_set_parent(hub->clk_disp, hub_state->clk);
921 if (err < 0)
922 dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
923 hub->clk_disp, hub_state->clk, err);
924 }
925
926 if (hub_state->dc)
927 tegra_display_hub_update(hub_state->dc);
928}
929
930static int tegra_display_hub_init(struct host1x_client *client)
931{
932 struct tegra_display_hub *hub = to_tegra_display_hub(client);
933 struct drm_device *drm = dev_get_drvdata(client->host);
934 struct tegra_drm *tegra = drm->dev_private;
935 struct tegra_display_hub_state *state;
936
937 state = kzalloc(sizeof(*state), GFP_KERNEL);
938 if (!state)
939 return -ENOMEM;
940
941 drm_atomic_private_obj_init(drm, &hub->base, &state->base,
942 &tegra_display_hub_state_funcs);
943
944 tegra->hub = hub;
945
946 return 0;
947}
948
949static int tegra_display_hub_exit(struct host1x_client *client)
950{
951 struct drm_device *drm = dev_get_drvdata(client->host);
952 struct tegra_drm *tegra = drm->dev_private;
953
954 drm_atomic_private_obj_fini(&tegra->hub->base);
955 tegra->hub = NULL;
956
957 return 0;
958}
959
960static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
961{
962 struct tegra_display_hub *hub = to_tegra_display_hub(client);
963 struct device *dev = client->dev;
964 unsigned int i = hub->num_heads;
965 int err;
966
967 err = reset_control_assert(hub->rst);
968 if (err < 0)
969 return err;
970
971 while (i--)
972 clk_disable_unprepare(hub->clk_heads[i]);
973
974 clk_disable_unprepare(hub->clk_hub);
975 clk_disable_unprepare(hub->clk_dsc);
976 clk_disable_unprepare(hub->clk_disp);
977
978 pm_runtime_put_sync(dev);
979
980 return 0;
981}
982
983static int tegra_display_hub_runtime_resume(struct host1x_client *client)
984{
985 struct tegra_display_hub *hub = to_tegra_display_hub(client);
986 struct device *dev = client->dev;
987 unsigned int i;
988 int err;
989
990 err = pm_runtime_resume_and_get(dev);
991 if (err < 0) {
992 dev_err(dev, "failed to get runtime PM: %d\n", err);
993 return err;
994 }
995
996 err = clk_prepare_enable(hub->clk_disp);
997 if (err < 0)
998 goto put_rpm;
999
1000 err = clk_prepare_enable(hub->clk_dsc);
1001 if (err < 0)
1002 goto disable_disp;
1003
1004 err = clk_prepare_enable(hub->clk_hub);
1005 if (err < 0)
1006 goto disable_dsc;
1007
1008 for (i = 0; i < hub->num_heads; i++) {
1009 err = clk_prepare_enable(hub->clk_heads[i]);
1010 if (err < 0)
1011 goto disable_heads;
1012 }
1013
1014 err = reset_control_deassert(hub->rst);
1015 if (err < 0)
1016 goto disable_heads;
1017
1018 return 0;
1019
1020disable_heads:
1021 while (i--)
1022 clk_disable_unprepare(hub->clk_heads[i]);
1023
1024 clk_disable_unprepare(hub->clk_hub);
1025disable_dsc:
1026 clk_disable_unprepare(hub->clk_dsc);
1027disable_disp:
1028 clk_disable_unprepare(hub->clk_disp);
1029put_rpm:
1030 pm_runtime_put_sync(dev);
1031 return err;
1032}
1033
1034static const struct host1x_client_ops tegra_display_hub_ops = {
1035 .init = tegra_display_hub_init,
1036 .exit = tegra_display_hub_exit,
1037 .suspend = tegra_display_hub_runtime_suspend,
1038 .resume = tegra_display_hub_runtime_resume,
1039};
1040
1041static int tegra_display_hub_probe(struct platform_device *pdev)
1042{
1043 u64 dma_mask = dma_get_mask(pdev->dev.parent);
1044 struct device_node *child = NULL;
1045 struct tegra_display_hub *hub;
1046 struct clk *clk;
1047 unsigned int i;
1048 int err;
1049
1050 err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1051 if (err < 0) {
1052 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1053 return err;
1054 }
1055
1056 hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1057 if (!hub)
1058 return -ENOMEM;
1059
1060 hub->soc = of_device_get_match_data(&pdev->dev);
1061
1062 hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1063 if (IS_ERR(hub->clk_disp)) {
1064 err = PTR_ERR(hub->clk_disp);
1065 return err;
1066 }
1067
1068 if (hub->soc->supports_dsc) {
1069 hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1070 if (IS_ERR(hub->clk_dsc)) {
1071 err = PTR_ERR(hub->clk_dsc);
1072 return err;
1073 }
1074 }
1075
1076 hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1077 if (IS_ERR(hub->clk_hub)) {
1078 err = PTR_ERR(hub->clk_hub);
1079 return err;
1080 }
1081
1082 hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1083 if (IS_ERR(hub->rst)) {
1084 err = PTR_ERR(hub->rst);
1085 return err;
1086 }
1087
1088 hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1089 sizeof(*hub->wgrps), GFP_KERNEL);
1090 if (!hub->wgrps)
1091 return -ENOMEM;
1092
1093 for (i = 0; i < hub->soc->num_wgrps; i++) {
1094 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1095 char id[8];
1096
1097 snprintf(id, sizeof(id), "wgrp%u", i);
1098 mutex_init(&wgrp->lock);
1099 wgrp->usecount = 0;
1100 wgrp->index = i;
1101
1102 wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1103 if (IS_ERR(wgrp->rst))
1104 return PTR_ERR(wgrp->rst);
1105
1106 err = reset_control_assert(wgrp->rst);
1107 if (err < 0)
1108 return err;
1109 }
1110
1111 hub->num_heads = of_get_child_count(pdev->dev.of_node);
1112
1113 hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1114 GFP_KERNEL);
1115 if (!hub->clk_heads)
1116 return -ENOMEM;
1117
1118 for (i = 0; i < hub->num_heads; i++) {
1119 child = of_get_next_child(pdev->dev.of_node, child);
1120 if (!child) {
1121 dev_err(&pdev->dev, "failed to find node for head %u\n",
1122 i);
1123 return -ENODEV;
1124 }
1125
1126 clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1127 if (IS_ERR(clk)) {
1128 dev_err(&pdev->dev, "failed to get clock for head %u\n",
1129 i);
1130 of_node_put(child);
1131 return PTR_ERR(clk);
1132 }
1133
1134 hub->clk_heads[i] = clk;
1135 }
1136
1137 of_node_put(child);
1138
1139 /* XXX: enable clock across reset? */
1140 err = reset_control_assert(hub->rst);
1141 if (err < 0)
1142 return err;
1143
1144 platform_set_drvdata(pdev, hub);
1145 pm_runtime_enable(&pdev->dev);
1146
1147 INIT_LIST_HEAD(&hub->client.list);
1148 hub->client.ops = &tegra_display_hub_ops;
1149 hub->client.dev = &pdev->dev;
1150
1151 err = host1x_client_register(&hub->client);
1152 if (err < 0)
1153 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1154 err);
1155
1156 err = devm_of_platform_populate(&pdev->dev);
1157 if (err < 0)
1158 goto unregister;
1159
1160 return err;
1161
1162unregister:
1163 host1x_client_unregister(&hub->client);
1164 pm_runtime_disable(&pdev->dev);
1165 return err;
1166}
1167
1168static int tegra_display_hub_remove(struct platform_device *pdev)
1169{
1170 struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1171 unsigned int i;
1172 int err;
1173
1174 err = host1x_client_unregister(&hub->client);
1175 if (err < 0) {
1176 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1177 err);
1178 }
1179
1180 for (i = 0; i < hub->soc->num_wgrps; i++) {
1181 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1182
1183 mutex_destroy(&wgrp->lock);
1184 }
1185
1186 pm_runtime_disable(&pdev->dev);
1187
1188 return err;
1189}
1190
1191static const struct tegra_display_hub_soc tegra186_display_hub = {
1192 .num_wgrps = 6,
1193 .supports_dsc = true,
1194};
1195
1196static const struct tegra_display_hub_soc tegra194_display_hub = {
1197 .num_wgrps = 6,
1198 .supports_dsc = false,
1199};
1200
1201static const struct of_device_id tegra_display_hub_of_match[] = {
1202 {
1203 .compatible = "nvidia,tegra194-display",
1204 .data = &tegra194_display_hub
1205 }, {
1206 .compatible = "nvidia,tegra186-display",
1207 .data = &tegra186_display_hub
1208 }, {
1209 /* sentinel */
1210 }
1211};
1212MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1213
1214struct platform_driver tegra_display_hub_driver = {
1215 .driver = {
1216 .name = "tegra-display-hub",
1217 .of_match_table = tegra_display_hub_of_match,
1218 },
1219 .probe = tegra_display_hub_probe,
1220 .remove = tegra_display_hub_remove,
1221};