Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/dma-mapping.h>
9#include <linux/host1x.h>
10#include <linux/module.h>
11#include <linux/of.h>
12#include <linux/of_graph.h>
13#include <linux/of_platform.h>
14#include <linux/platform_device.h>
15#include <linux/pm_runtime.h>
16#include <linux/reset.h>
17
18#include <drm/drm_atomic.h>
19#include <drm/drm_atomic_helper.h>
20#include <drm/drm_blend.h>
21#include <drm/drm_fourcc.h>
22#include <drm/drm_framebuffer.h>
23#include <drm/drm_probe_helper.h>
24
25#include "drm.h"
26#include "dc.h"
27#include "plane.h"
28
29#define NFB 24
30
31static const u32 tegra_shared_plane_formats[] = {
32 DRM_FORMAT_ARGB1555,
33 DRM_FORMAT_RGB565,
34 DRM_FORMAT_RGBA5551,
35 DRM_FORMAT_ARGB8888,
36 DRM_FORMAT_ABGR8888,
37 /* new on Tegra114 */
38 DRM_FORMAT_ABGR4444,
39 DRM_FORMAT_ABGR1555,
40 DRM_FORMAT_BGRA5551,
41 DRM_FORMAT_XRGB1555,
42 DRM_FORMAT_RGBX5551,
43 DRM_FORMAT_XBGR1555,
44 DRM_FORMAT_BGRX5551,
45 DRM_FORMAT_BGR565,
46 DRM_FORMAT_XRGB8888,
47 DRM_FORMAT_XBGR8888,
48 /* planar formats */
49 DRM_FORMAT_UYVY,
50 DRM_FORMAT_YUYV,
51 DRM_FORMAT_YUV420,
52 DRM_FORMAT_YUV422,
53};
54
55static const u64 tegra_shared_plane_modifiers[] = {
56 DRM_FORMAT_MOD_LINEAR,
57 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
58 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
59 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
60 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
61 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
62 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
63 /*
64 * The GPU sector layout is only supported on Tegra194, but these will
65 * be filtered out later on by ->format_mod_supported() on SoCs where
66 * it isn't supported.
67 */
68 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
69 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
70 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
71 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
72 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
73 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
74 /* sentinel */
75 DRM_FORMAT_MOD_INVALID
76};
77
78static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
79 unsigned int offset)
80{
81 if (offset >= 0x500 && offset <= 0x581) {
82 offset = 0x000 + (offset - 0x500);
83 return plane->offset + offset;
84 }
85
86 if (offset >= 0x700 && offset <= 0x73c) {
87 offset = 0x180 + (offset - 0x700);
88 return plane->offset + offset;
89 }
90
91 if (offset >= 0x800 && offset <= 0x83e) {
92 offset = 0x1c0 + (offset - 0x800);
93 return plane->offset + offset;
94 }
95
96 dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
97
98 return plane->offset + offset;
99}
100
101static inline u32 tegra_plane_readl(struct tegra_plane *plane,
102 unsigned int offset)
103{
104 return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
105}
106
107static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
108 unsigned int offset)
109{
110 tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
111}
112
113static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
114{
115 int err = 0;
116
117 mutex_lock(&wgrp->lock);
118
119 if (wgrp->usecount == 0) {
120 err = host1x_client_resume(wgrp->parent);
121 if (err < 0) {
122 dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
123 goto unlock;
124 }
125
126 reset_control_deassert(wgrp->rst);
127 }
128
129 wgrp->usecount++;
130
131unlock:
132 mutex_unlock(&wgrp->lock);
133 return err;
134}
135
136static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
137{
138 int err;
139
140 mutex_lock(&wgrp->lock);
141
142 if (wgrp->usecount == 1) {
143 err = reset_control_assert(wgrp->rst);
144 if (err < 0) {
145 pr_err("failed to assert reset for window group %u\n",
146 wgrp->index);
147 }
148
149 host1x_client_suspend(wgrp->parent);
150 }
151
152 wgrp->usecount--;
153 mutex_unlock(&wgrp->lock);
154}
155
156int tegra_display_hub_prepare(struct tegra_display_hub *hub)
157{
158 unsigned int i;
159
160 /*
161 * XXX Enabling/disabling windowgroups needs to happen when the owner
162 * display controller is disabled. There's currently no good point at
163 * which this could be executed, so unconditionally enable all window
164 * groups for now.
165 */
166 for (i = 0; i < hub->soc->num_wgrps; i++) {
167 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
168
169 /* Skip orphaned window group whose parent DC is disabled */
170 if (wgrp->parent)
171 tegra_windowgroup_enable(wgrp);
172 }
173
174 return 0;
175}
176
177void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
178{
179 unsigned int i;
180
181 /*
182 * XXX Remove this once window groups can be more fine-grainedly
183 * enabled and disabled.
184 */
185 for (i = 0; i < hub->soc->num_wgrps; i++) {
186 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
187
188 /* Skip orphaned window group whose parent DC is disabled */
189 if (wgrp->parent)
190 tegra_windowgroup_disable(wgrp);
191 }
192}
193
194static void tegra_shared_plane_update(struct tegra_plane *plane)
195{
196 struct tegra_dc *dc = plane->dc;
197 unsigned long timeout;
198 u32 mask, value;
199
200 mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
201 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
202
203 timeout = jiffies + msecs_to_jiffies(1000);
204
205 while (time_before(jiffies, timeout)) {
206 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
207 if ((value & mask) == 0)
208 break;
209
210 usleep_range(100, 400);
211 }
212}
213
214static void tegra_shared_plane_activate(struct tegra_plane *plane)
215{
216 struct tegra_dc *dc = plane->dc;
217 unsigned long timeout;
218 u32 mask, value;
219
220 mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
221 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
222
223 timeout = jiffies + msecs_to_jiffies(1000);
224
225 while (time_before(jiffies, timeout)) {
226 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
227 if ((value & mask) == 0)
228 break;
229
230 usleep_range(100, 400);
231 }
232}
233
234static unsigned int
235tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
236{
237 unsigned int offset =
238 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
239
240 return tegra_dc_readl(dc, offset) & OWNER_MASK;
241}
242
243static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
244 struct tegra_plane *plane)
245{
246 struct device *dev = dc->dev;
247
248 if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
249 if (plane->dc == dc)
250 return true;
251
252 dev_WARN(dev, "head %u owns window %u but is not attached\n",
253 dc->pipe, plane->index);
254 }
255
256 return false;
257}
258
259static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
260 struct tegra_dc *new)
261{
262 unsigned int offset =
263 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
264 struct tegra_dc *old = plane->dc, *dc = new ? new : old;
265 struct device *dev = new ? new->dev : old->dev;
266 unsigned int owner, index = plane->index;
267 u32 value;
268
269 value = tegra_dc_readl(dc, offset);
270 owner = value & OWNER_MASK;
271
272 if (new && (owner != OWNER_MASK && owner != new->pipe)) {
273 dev_WARN(dev, "window %u owned by head %u\n", index, owner);
274 return -EBUSY;
275 }
276
277 /*
278 * This seems to happen whenever the head has been disabled with one
279 * or more windows being active. This is harmless because we'll just
280 * reassign the window to the new head anyway.
281 */
282 if (old && owner == OWNER_MASK)
283 dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
284 old->pipe, owner);
285
286 value &= ~OWNER_MASK;
287
288 if (new)
289 value |= OWNER(new->pipe);
290 else
291 value |= OWNER_MASK;
292
293 tegra_dc_writel(dc, value, offset);
294
295 plane->dc = new;
296
297 return 0;
298}
299
300static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
301{
302 static const unsigned int coeffs[192] = {
303 0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
304 0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
305 0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
306 0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
307 0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
308 0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
309 0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
310 0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
311 0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
312 0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
313 0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
314 0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
315 0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
316 0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
317 0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
318 0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
319 0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
320 0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
321 0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
322 0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
323 0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
324 0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
325 0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
326 0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
327 0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
328 0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
329 0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
330 0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
331 0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
332 0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
333 0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
334 0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
335 0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
336 0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
337 0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
338 0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
339 0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
340 0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
341 0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
342 0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
343 0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
344 0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
345 0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
346 0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
347 0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
348 0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
349 0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
350 0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
351 };
352 unsigned int ratio, row, column;
353
354 for (ratio = 0; ratio <= 2; ratio++) {
355 for (row = 0; row <= 15; row++) {
356 for (column = 0; column <= 3; column++) {
357 unsigned int index = (ratio << 6) + (row << 2) + column;
358 u32 value;
359
360 value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
361 tegra_plane_writel(plane, value,
362 DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
363 }
364 }
365 }
366}
367
368static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
369 struct tegra_plane *plane)
370{
371 u32 value;
372 int err;
373
374 if (!tegra_dc_owns_shared_plane(dc, plane)) {
375 err = tegra_shared_plane_set_owner(plane, dc);
376 if (err < 0)
377 return;
378 }
379
380 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
381 value |= MODE_FOUR_LINES;
382 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
383
384 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
385 value = SLOTS(1);
386 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
387
388 /* disable watermark */
389 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
390 value &= ~LATENCY_CTL_MODE_ENABLE;
391 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
392
393 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
394 value |= WATERMARK_MASK;
395 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
396
397 /* pipe meter */
398 value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
399 value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
400 tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
401
402 /* mempool entries */
403 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
404 value = MEMPOOL_ENTRIES(0x331);
405 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
406
407 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
408 value &= ~THREAD_NUM_MASK;
409 value |= THREAD_NUM(plane->base.index);
410 value |= THREAD_GROUP_ENABLE;
411 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
412
413 tegra_shared_plane_setup_scaler(plane);
414
415 tegra_shared_plane_update(plane);
416 tegra_shared_plane_activate(plane);
417}
418
419static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
420 struct tegra_plane *plane)
421{
422 tegra_shared_plane_set_owner(plane, NULL);
423}
424
425static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
426 struct drm_atomic_state *state)
427{
428 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
429 plane);
430 struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
431 struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
432 struct tegra_bo_tiling *tiling = &plane_state->tiling;
433 struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
434 int err;
435
436 /* no need for further checks if the plane is being disabled */
437 if (!new_plane_state->crtc || !new_plane_state->fb)
438 return 0;
439
440 err = tegra_plane_format(new_plane_state->fb->format->format,
441 &plane_state->format,
442 &plane_state->swap);
443 if (err < 0)
444 return err;
445
446 err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
447 if (err < 0)
448 return err;
449
450 if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
451 !dc->soc->supports_block_linear) {
452 DRM_ERROR("hardware doesn't support block linear mode\n");
453 return -EINVAL;
454 }
455
456 if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
457 !dc->soc->supports_sector_layout) {
458 DRM_ERROR("hardware doesn't support GPU sector layout\n");
459 return -EINVAL;
460 }
461
462 /*
463 * Tegra doesn't support different strides for U and V planes so we
464 * error out if the user tries to display a framebuffer with such a
465 * configuration.
466 */
467 if (new_plane_state->fb->format->num_planes > 2) {
468 if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
469 DRM_ERROR("unsupported UV-plane configuration\n");
470 return -EINVAL;
471 }
472 }
473
474 /* XXX scaling is not yet supported, add a check here */
475
476 err = tegra_plane_state_add(&tegra->base, new_plane_state);
477 if (err < 0)
478 return err;
479
480 return 0;
481}
482
483static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
484 struct drm_atomic_state *state)
485{
486 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
487 plane);
488 struct tegra_plane *p = to_tegra_plane(plane);
489 struct tegra_dc *dc;
490 u32 value;
491 int err;
492
493 /* rien ne va plus */
494 if (!old_state || !old_state->crtc)
495 return;
496
497 dc = to_tegra_dc(old_state->crtc);
498
499 err = host1x_client_resume(&dc->client);
500 if (err < 0) {
501 dev_err(dc->dev, "failed to resume: %d\n", err);
502 return;
503 }
504
505 /*
506 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
507 * on planes that are already disabled. Make sure we fallback to the
508 * head for this particular state instead of crashing.
509 */
510 if (WARN_ON(p->dc == NULL))
511 p->dc = dc;
512
513 value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
514 value &= ~WIN_ENABLE;
515 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
516
517 tegra_dc_remove_shared_plane(dc, p);
518
519 host1x_client_suspend(&dc->client);
520}
521
522static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
523{
524 u64 tmp, tmp1, tmp2;
525
526 tmp = (u64)dfixed_trunc(in);
527 tmp2 = (u64)out;
528 tmp1 = (tmp << NFB) + (tmp2 >> 1);
529 do_div(tmp1, tmp2);
530
531 return lower_32_bits(tmp1);
532}
533
534static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
535 struct drm_atomic_state *state)
536{
537 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
538 plane);
539 struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
540 struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
541 unsigned int zpos = new_state->normalized_zpos;
542 struct drm_framebuffer *fb = new_state->fb;
543 struct tegra_plane *p = to_tegra_plane(plane);
544 u32 value, min_width, bypass = 0;
545 dma_addr_t base, addr_flag = 0;
546 unsigned int bpc, planes;
547 bool yuv;
548 int err;
549
550 /* rien ne va plus */
551 if (!new_state->crtc || !new_state->fb)
552 return;
553
554 if (!new_state->visible) {
555 tegra_shared_plane_atomic_disable(plane, state);
556 return;
557 }
558
559 err = host1x_client_resume(&dc->client);
560 if (err < 0) {
561 dev_err(dc->dev, "failed to resume: %d\n", err);
562 return;
563 }
564
565 yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
566
567 tegra_dc_assign_shared_plane(dc, p);
568
569 tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
570
571 /* blending */
572 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
573 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
574 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
575 tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
576
577 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
578 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
579 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
580 tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
581
582 value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
583 tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
584
585 /* scaling */
586 min_width = min(new_state->src_w >> 16, new_state->crtc_w);
587
588 value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
589
590 if (min_width < MAX_PIXELS_5TAP444(value)) {
591 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
592 } else {
593 value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
594
595 if (min_width < MAX_PIXELS_2TAP444(value))
596 value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
597 else
598 dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
599 }
600
601 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
602 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
603
604 if (new_state->src_w != new_state->crtc_w << 16) {
605 fixed20_12 width = dfixed_init(new_state->src_w >> 16);
606 u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
607 u32 init = (1 << (NFB - 1)) + (incr >> 1);
608
609 tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
610 tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
611 } else {
612 bypass |= INPUT_SCALER_HBYPASS;
613 }
614
615 if (new_state->src_h != new_state->crtc_h << 16) {
616 fixed20_12 height = dfixed_init(new_state->src_h >> 16);
617 u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
618 u32 init = (1 << (NFB - 1)) + (incr >> 1);
619
620 tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
621 tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
622 } else {
623 bypass |= INPUT_SCALER_VBYPASS;
624 }
625
626 tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
627
628 /* disable compression */
629 tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
630
631#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
632 /*
633 * Physical address bit 39 in Tegra194 is used as a switch for special
634 * logic that swizzles the memory using either the legacy Tegra or the
635 * dGPU sector layout.
636 */
637 if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
638 addr_flag = BIT_ULL(39);
639#endif
640
641 base = tegra_plane_state->iova[0] + fb->offsets[0];
642 base |= addr_flag;
643
644 tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
645 tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
646
647 value = V_POSITION(new_state->crtc_y) |
648 H_POSITION(new_state->crtc_x);
649 tegra_plane_writel(p, value, DC_WIN_POSITION);
650
651 value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
652 tegra_plane_writel(p, value, DC_WIN_SIZE);
653
654 value = WIN_ENABLE | COLOR_EXPAND;
655 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
656
657 value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
658 tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
659
660 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
661 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
662
663 value = PITCH(fb->pitches[0]);
664 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
665
666 if (yuv && planes > 1) {
667 base = tegra_plane_state->iova[1] + fb->offsets[1];
668 base |= addr_flag;
669
670 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
671 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
672
673 if (planes > 2) {
674 base = tegra_plane_state->iova[2] + fb->offsets[2];
675 base |= addr_flag;
676
677 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
678 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
679 }
680
681 value = PITCH_U(fb->pitches[1]);
682
683 if (planes > 2)
684 value |= PITCH_V(fb->pitches[2]);
685
686 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
687 } else {
688 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
689 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
690 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
691 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
692 tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
693 }
694
695 value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
696
697 if (yuv) {
698 if (bpc < 12)
699 value |= DEGAMMA_YUV8_10;
700 else
701 value |= DEGAMMA_YUV12;
702
703 /* XXX parameterize */
704 value |= COLOR_SPACE_YUV_2020;
705 } else {
706 if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
707 value |= DEGAMMA_SRGB;
708 }
709
710 tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
711
712 value = OFFSET_X(new_state->src_y >> 16) |
713 OFFSET_Y(new_state->src_x >> 16);
714 tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
715
716 if (dc->soc->supports_block_linear) {
717 unsigned long height = tegra_plane_state->tiling.value;
718
719 /* XXX */
720 switch (tegra_plane_state->tiling.mode) {
721 case TEGRA_BO_TILING_MODE_PITCH:
722 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
723 DC_WINBUF_SURFACE_KIND_PITCH;
724 break;
725
726 /* XXX not supported on Tegra186 and later */
727 case TEGRA_BO_TILING_MODE_TILED:
728 value = DC_WINBUF_SURFACE_KIND_TILED;
729 break;
730
731 case TEGRA_BO_TILING_MODE_BLOCK:
732 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
733 DC_WINBUF_SURFACE_KIND_BLOCK;
734 break;
735 }
736
737 tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
738 }
739
740 /* disable gamut CSC */
741 value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
742 value &= ~CONTROL_CSC_ENABLE;
743 tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
744
745 host1x_client_suspend(&dc->client);
746}
747
748static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
749 .prepare_fb = tegra_plane_prepare_fb,
750 .cleanup_fb = tegra_plane_cleanup_fb,
751 .atomic_check = tegra_shared_plane_atomic_check,
752 .atomic_update = tegra_shared_plane_atomic_update,
753 .atomic_disable = tegra_shared_plane_atomic_disable,
754};
755
756struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
757 struct tegra_dc *dc,
758 unsigned int wgrp,
759 unsigned int index)
760{
761 enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
762 struct tegra_drm *tegra = drm->dev_private;
763 struct tegra_display_hub *hub = tegra->hub;
764 struct tegra_shared_plane *plane;
765 unsigned int possible_crtcs;
766 unsigned int num_formats;
767 const u64 *modifiers;
768 struct drm_plane *p;
769 const u32 *formats;
770 int err;
771
772 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
773 if (!plane)
774 return ERR_PTR(-ENOMEM);
775
776 plane->base.offset = 0x0a00 + 0x0300 * index;
777 plane->base.index = index;
778
779 plane->wgrp = &hub->wgrps[wgrp];
780 plane->wgrp->parent = &dc->client;
781
782 p = &plane->base.base;
783
784 /* planes can be assigned to arbitrary CRTCs */
785 possible_crtcs = BIT(tegra->num_crtcs) - 1;
786
787 num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
788 formats = tegra_shared_plane_formats;
789 modifiers = tegra_shared_plane_modifiers;
790
791 err = drm_universal_plane_init(drm, p, possible_crtcs,
792 &tegra_plane_funcs, formats,
793 num_formats, modifiers, type, NULL);
794 if (err < 0) {
795 kfree(plane);
796 return ERR_PTR(err);
797 }
798
799 drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
800 drm_plane_create_zpos_property(p, 0, 0, 255);
801
802 return p;
803}
804
805static struct drm_private_state *
806tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
807{
808 struct tegra_display_hub_state *state;
809
810 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
811 if (!state)
812 return NULL;
813
814 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
815
816 return &state->base;
817}
818
819static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
820 struct drm_private_state *state)
821{
822 struct tegra_display_hub_state *hub_state =
823 to_tegra_display_hub_state(state);
824
825 kfree(hub_state);
826}
827
828static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
829 .atomic_duplicate_state = tegra_display_hub_duplicate_state,
830 .atomic_destroy_state = tegra_display_hub_destroy_state,
831};
832
833static struct tegra_display_hub_state *
834tegra_display_hub_get_state(struct tegra_display_hub *hub,
835 struct drm_atomic_state *state)
836{
837 struct drm_private_state *priv;
838
839 priv = drm_atomic_get_private_obj_state(state, &hub->base);
840 if (IS_ERR(priv))
841 return ERR_CAST(priv);
842
843 return to_tegra_display_hub_state(priv);
844}
845
846int tegra_display_hub_atomic_check(struct drm_device *drm,
847 struct drm_atomic_state *state)
848{
849 struct tegra_drm *tegra = drm->dev_private;
850 struct tegra_display_hub_state *hub_state;
851 struct drm_crtc_state *old, *new;
852 struct drm_crtc *crtc;
853 unsigned int i;
854
855 if (!tegra->hub)
856 return 0;
857
858 hub_state = tegra_display_hub_get_state(tegra->hub, state);
859 if (IS_ERR(hub_state))
860 return PTR_ERR(hub_state);
861
862 /*
863 * The display hub display clock needs to be fed by the display clock
864 * with the highest frequency to ensure proper functioning of all the
865 * displays.
866 *
867 * Note that this isn't used before Tegra186, but it doesn't hurt and
868 * conditionalizing it would make the code less clean.
869 */
870 for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
871 struct tegra_dc_state *dc = to_dc_state(new);
872
873 if (new->active) {
874 if (!hub_state->clk || dc->pclk > hub_state->rate) {
875 hub_state->dc = to_tegra_dc(dc->base.crtc);
876 hub_state->clk = hub_state->dc->clk;
877 hub_state->rate = dc->pclk;
878 }
879 }
880 }
881
882 return 0;
883}
884
885static void tegra_display_hub_update(struct tegra_dc *dc)
886{
887 u32 value;
888 int err;
889
890 err = host1x_client_resume(&dc->client);
891 if (err < 0) {
892 dev_err(dc->dev, "failed to resume: %d\n", err);
893 return;
894 }
895
896 value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
897 value &= ~LATENCY_EVENT;
898 tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
899
900 value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
901 value = CURS_SLOTS(1) | WGRP_SLOTS(1);
902 tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
903
904 tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
905 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
906 tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
907 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
908
909 host1x_client_suspend(&dc->client);
910}
911
912void tegra_display_hub_atomic_commit(struct drm_device *drm,
913 struct drm_atomic_state *state)
914{
915 struct tegra_drm *tegra = drm->dev_private;
916 struct tegra_display_hub *hub = tegra->hub;
917 struct tegra_display_hub_state *hub_state;
918 struct device *dev = hub->client.dev;
919 int err;
920
921 hub_state = to_tegra_display_hub_state(hub->base.state);
922
923 if (hub_state->clk) {
924 err = clk_set_rate(hub_state->clk, hub_state->rate);
925 if (err < 0)
926 dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
927 hub_state->clk, hub_state->rate);
928
929 err = clk_set_parent(hub->clk_disp, hub_state->clk);
930 if (err < 0)
931 dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
932 hub->clk_disp, hub_state->clk, err);
933 }
934
935 if (hub_state->dc)
936 tegra_display_hub_update(hub_state->dc);
937}
938
939static int tegra_display_hub_init(struct host1x_client *client)
940{
941 struct tegra_display_hub *hub = to_tegra_display_hub(client);
942 struct drm_device *drm = dev_get_drvdata(client->host);
943 struct tegra_drm *tegra = drm->dev_private;
944 struct tegra_display_hub_state *state;
945
946 state = kzalloc(sizeof(*state), GFP_KERNEL);
947 if (!state)
948 return -ENOMEM;
949
950 drm_atomic_private_obj_init(drm, &hub->base, &state->base,
951 &tegra_display_hub_state_funcs);
952
953 tegra->hub = hub;
954
955 return 0;
956}
957
958static int tegra_display_hub_exit(struct host1x_client *client)
959{
960 struct drm_device *drm = dev_get_drvdata(client->host);
961 struct tegra_drm *tegra = drm->dev_private;
962
963 drm_atomic_private_obj_fini(&tegra->hub->base);
964 tegra->hub = NULL;
965
966 return 0;
967}
968
969static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
970{
971 struct tegra_display_hub *hub = to_tegra_display_hub(client);
972 struct device *dev = client->dev;
973 unsigned int i = hub->num_heads;
974 int err;
975
976 err = reset_control_assert(hub->rst);
977 if (err < 0)
978 return err;
979
980 while (i--)
981 clk_disable_unprepare(hub->clk_heads[i]);
982
983 clk_disable_unprepare(hub->clk_hub);
984 clk_disable_unprepare(hub->clk_dsc);
985 clk_disable_unprepare(hub->clk_disp);
986
987 pm_runtime_put_sync(dev);
988
989 return 0;
990}
991
992static int tegra_display_hub_runtime_resume(struct host1x_client *client)
993{
994 struct tegra_display_hub *hub = to_tegra_display_hub(client);
995 struct device *dev = client->dev;
996 unsigned int i;
997 int err;
998
999 err = pm_runtime_resume_and_get(dev);
1000 if (err < 0) {
1001 dev_err(dev, "failed to get runtime PM: %d\n", err);
1002 return err;
1003 }
1004
1005 err = clk_prepare_enable(hub->clk_disp);
1006 if (err < 0)
1007 goto put_rpm;
1008
1009 err = clk_prepare_enable(hub->clk_dsc);
1010 if (err < 0)
1011 goto disable_disp;
1012
1013 err = clk_prepare_enable(hub->clk_hub);
1014 if (err < 0)
1015 goto disable_dsc;
1016
1017 for (i = 0; i < hub->num_heads; i++) {
1018 err = clk_prepare_enable(hub->clk_heads[i]);
1019 if (err < 0)
1020 goto disable_heads;
1021 }
1022
1023 err = reset_control_deassert(hub->rst);
1024 if (err < 0)
1025 goto disable_heads;
1026
1027 return 0;
1028
1029disable_heads:
1030 while (i--)
1031 clk_disable_unprepare(hub->clk_heads[i]);
1032
1033 clk_disable_unprepare(hub->clk_hub);
1034disable_dsc:
1035 clk_disable_unprepare(hub->clk_dsc);
1036disable_disp:
1037 clk_disable_unprepare(hub->clk_disp);
1038put_rpm:
1039 pm_runtime_put_sync(dev);
1040 return err;
1041}
1042
1043static const struct host1x_client_ops tegra_display_hub_ops = {
1044 .init = tegra_display_hub_init,
1045 .exit = tegra_display_hub_exit,
1046 .suspend = tegra_display_hub_runtime_suspend,
1047 .resume = tegra_display_hub_runtime_resume,
1048};
1049
1050static int tegra_display_hub_probe(struct platform_device *pdev)
1051{
1052 u64 dma_mask = dma_get_mask(pdev->dev.parent);
1053 struct device_node *child = NULL;
1054 struct tegra_display_hub *hub;
1055 struct clk *clk;
1056 unsigned int i;
1057 int err;
1058
1059 err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1060 if (err < 0) {
1061 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1062 return err;
1063 }
1064
1065 hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1066 if (!hub)
1067 return -ENOMEM;
1068
1069 hub->soc = of_device_get_match_data(&pdev->dev);
1070
1071 hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1072 if (IS_ERR(hub->clk_disp)) {
1073 err = PTR_ERR(hub->clk_disp);
1074 return err;
1075 }
1076
1077 if (hub->soc->supports_dsc) {
1078 hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1079 if (IS_ERR(hub->clk_dsc)) {
1080 err = PTR_ERR(hub->clk_dsc);
1081 return err;
1082 }
1083 }
1084
1085 hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1086 if (IS_ERR(hub->clk_hub)) {
1087 err = PTR_ERR(hub->clk_hub);
1088 return err;
1089 }
1090
1091 hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1092 if (IS_ERR(hub->rst)) {
1093 err = PTR_ERR(hub->rst);
1094 return err;
1095 }
1096
1097 hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1098 sizeof(*hub->wgrps), GFP_KERNEL);
1099 if (!hub->wgrps)
1100 return -ENOMEM;
1101
1102 for (i = 0; i < hub->soc->num_wgrps; i++) {
1103 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1104 char id[16];
1105
1106 snprintf(id, sizeof(id), "wgrp%u", i);
1107 mutex_init(&wgrp->lock);
1108 wgrp->usecount = 0;
1109 wgrp->index = i;
1110
1111 wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1112 if (IS_ERR(wgrp->rst))
1113 return PTR_ERR(wgrp->rst);
1114
1115 err = reset_control_assert(wgrp->rst);
1116 if (err < 0)
1117 return err;
1118 }
1119
1120 hub->num_heads = of_get_child_count(pdev->dev.of_node);
1121
1122 hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1123 GFP_KERNEL);
1124 if (!hub->clk_heads)
1125 return -ENOMEM;
1126
1127 for (i = 0; i < hub->num_heads; i++) {
1128 child = of_get_next_child(pdev->dev.of_node, child);
1129 if (!child) {
1130 dev_err(&pdev->dev, "failed to find node for head %u\n",
1131 i);
1132 return -ENODEV;
1133 }
1134
1135 clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1136 if (IS_ERR(clk)) {
1137 dev_err(&pdev->dev, "failed to get clock for head %u\n",
1138 i);
1139 of_node_put(child);
1140 return PTR_ERR(clk);
1141 }
1142
1143 hub->clk_heads[i] = clk;
1144 }
1145
1146 of_node_put(child);
1147
1148 /* XXX: enable clock across reset? */
1149 err = reset_control_assert(hub->rst);
1150 if (err < 0)
1151 return err;
1152
1153 platform_set_drvdata(pdev, hub);
1154 pm_runtime_enable(&pdev->dev);
1155
1156 INIT_LIST_HEAD(&hub->client.list);
1157 hub->client.ops = &tegra_display_hub_ops;
1158 hub->client.dev = &pdev->dev;
1159
1160 err = host1x_client_register(&hub->client);
1161 if (err < 0)
1162 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1163 err);
1164
1165 err = devm_of_platform_populate(&pdev->dev);
1166 if (err < 0)
1167 goto unregister;
1168
1169 return err;
1170
1171unregister:
1172 host1x_client_unregister(&hub->client);
1173 pm_runtime_disable(&pdev->dev);
1174 return err;
1175}
1176
1177static void tegra_display_hub_remove(struct platform_device *pdev)
1178{
1179 struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1180 unsigned int i;
1181
1182 host1x_client_unregister(&hub->client);
1183
1184 for (i = 0; i < hub->soc->num_wgrps; i++) {
1185 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1186
1187 mutex_destroy(&wgrp->lock);
1188 }
1189
1190 pm_runtime_disable(&pdev->dev);
1191}
1192
1193static const struct tegra_display_hub_soc tegra186_display_hub = {
1194 .num_wgrps = 6,
1195 .supports_dsc = true,
1196};
1197
1198static const struct tegra_display_hub_soc tegra194_display_hub = {
1199 .num_wgrps = 6,
1200 .supports_dsc = false,
1201};
1202
1203static const struct of_device_id tegra_display_hub_of_match[] = {
1204 {
1205 .compatible = "nvidia,tegra194-display",
1206 .data = &tegra194_display_hub
1207 }, {
1208 .compatible = "nvidia,tegra186-display",
1209 .data = &tegra186_display_hub
1210 }, {
1211 /* sentinel */
1212 }
1213};
1214MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1215
1216struct platform_driver tegra_display_hub_driver = {
1217 .driver = {
1218 .name = "tegra-display-hub",
1219 .of_match_table = tegra_display_hub_of_match,
1220 },
1221 .probe = tegra_display_hub_probe,
1222 .remove_new = tegra_display_hub_remove,
1223};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/host1x.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/of_graph.h>
13#include <linux/platform_device.h>
14#include <linux/pm_runtime.h>
15#include <linux/reset.h>
16
17#include <drm/drm_atomic.h>
18#include <drm/drm_atomic_helper.h>
19#include <drm/drm_fourcc.h>
20#include <drm/drm_probe_helper.h>
21
22#include "drm.h"
23#include "dc.h"
24#include "plane.h"
25
26static const u32 tegra_shared_plane_formats[] = {
27 DRM_FORMAT_ARGB1555,
28 DRM_FORMAT_RGB565,
29 DRM_FORMAT_RGBA5551,
30 DRM_FORMAT_ARGB8888,
31 DRM_FORMAT_ABGR8888,
32 /* new on Tegra114 */
33 DRM_FORMAT_ABGR4444,
34 DRM_FORMAT_ABGR1555,
35 DRM_FORMAT_BGRA5551,
36 DRM_FORMAT_XRGB1555,
37 DRM_FORMAT_RGBX5551,
38 DRM_FORMAT_XBGR1555,
39 DRM_FORMAT_BGRX5551,
40 DRM_FORMAT_BGR565,
41 DRM_FORMAT_XRGB8888,
42 DRM_FORMAT_XBGR8888,
43 /* planar formats */
44 DRM_FORMAT_UYVY,
45 DRM_FORMAT_YUYV,
46 DRM_FORMAT_YUV420,
47 DRM_FORMAT_YUV422,
48};
49
50static const u64 tegra_shared_plane_modifiers[] = {
51 DRM_FORMAT_MOD_LINEAR,
52 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
53 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
54 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
55 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
56 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
57 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
58 DRM_FORMAT_MOD_INVALID
59};
60
61static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
62 unsigned int offset)
63{
64 if (offset >= 0x500 && offset <= 0x581) {
65 offset = 0x000 + (offset - 0x500);
66 return plane->offset + offset;
67 }
68
69 if (offset >= 0x700 && offset <= 0x73c) {
70 offset = 0x180 + (offset - 0x700);
71 return plane->offset + offset;
72 }
73
74 if (offset >= 0x800 && offset <= 0x83e) {
75 offset = 0x1c0 + (offset - 0x800);
76 return plane->offset + offset;
77 }
78
79 dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
80
81 return plane->offset + offset;
82}
83
84static inline u32 tegra_plane_readl(struct tegra_plane *plane,
85 unsigned int offset)
86{
87 return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
88}
89
90static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
91 unsigned int offset)
92{
93 tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
94}
95
96static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
97{
98 int err = 0;
99
100 mutex_lock(&wgrp->lock);
101
102 if (wgrp->usecount == 0) {
103 err = host1x_client_resume(wgrp->parent);
104 if (err < 0) {
105 dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
106 goto unlock;
107 }
108
109 reset_control_deassert(wgrp->rst);
110 }
111
112 wgrp->usecount++;
113
114unlock:
115 mutex_unlock(&wgrp->lock);
116 return err;
117}
118
119static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
120{
121 int err;
122
123 mutex_lock(&wgrp->lock);
124
125 if (wgrp->usecount == 1) {
126 err = reset_control_assert(wgrp->rst);
127 if (err < 0) {
128 pr_err("failed to assert reset for window group %u\n",
129 wgrp->index);
130 }
131
132 host1x_client_suspend(wgrp->parent);
133 }
134
135 wgrp->usecount--;
136 mutex_unlock(&wgrp->lock);
137}
138
139int tegra_display_hub_prepare(struct tegra_display_hub *hub)
140{
141 unsigned int i;
142
143 /*
144 * XXX Enabling/disabling windowgroups needs to happen when the owner
145 * display controller is disabled. There's currently no good point at
146 * which this could be executed, so unconditionally enable all window
147 * groups for now.
148 */
149 for (i = 0; i < hub->soc->num_wgrps; i++) {
150 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
151
152 /* Skip orphaned window group whose parent DC is disabled */
153 if (wgrp->parent)
154 tegra_windowgroup_enable(wgrp);
155 }
156
157 return 0;
158}
159
160void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
161{
162 unsigned int i;
163
164 /*
165 * XXX Remove this once window groups can be more fine-grainedly
166 * enabled and disabled.
167 */
168 for (i = 0; i < hub->soc->num_wgrps; i++) {
169 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
170
171 /* Skip orphaned window group whose parent DC is disabled */
172 if (wgrp->parent)
173 tegra_windowgroup_disable(wgrp);
174 }
175}
176
177static void tegra_shared_plane_update(struct tegra_plane *plane)
178{
179 struct tegra_dc *dc = plane->dc;
180 unsigned long timeout;
181 u32 mask, value;
182
183 mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
184 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
185
186 timeout = jiffies + msecs_to_jiffies(1000);
187
188 while (time_before(jiffies, timeout)) {
189 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
190 if ((value & mask) == 0)
191 break;
192
193 usleep_range(100, 400);
194 }
195}
196
197static void tegra_shared_plane_activate(struct tegra_plane *plane)
198{
199 struct tegra_dc *dc = plane->dc;
200 unsigned long timeout;
201 u32 mask, value;
202
203 mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
204 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
205
206 timeout = jiffies + msecs_to_jiffies(1000);
207
208 while (time_before(jiffies, timeout)) {
209 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
210 if ((value & mask) == 0)
211 break;
212
213 usleep_range(100, 400);
214 }
215}
216
217static unsigned int
218tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
219{
220 unsigned int offset =
221 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
222
223 return tegra_dc_readl(dc, offset) & OWNER_MASK;
224}
225
226static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
227 struct tegra_plane *plane)
228{
229 struct device *dev = dc->dev;
230
231 if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
232 if (plane->dc == dc)
233 return true;
234
235 dev_WARN(dev, "head %u owns window %u but is not attached\n",
236 dc->pipe, plane->index);
237 }
238
239 return false;
240}
241
242static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
243 struct tegra_dc *new)
244{
245 unsigned int offset =
246 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
247 struct tegra_dc *old = plane->dc, *dc = new ? new : old;
248 struct device *dev = new ? new->dev : old->dev;
249 unsigned int owner, index = plane->index;
250 u32 value;
251
252 value = tegra_dc_readl(dc, offset);
253 owner = value & OWNER_MASK;
254
255 if (new && (owner != OWNER_MASK && owner != new->pipe)) {
256 dev_WARN(dev, "window %u owned by head %u\n", index, owner);
257 return -EBUSY;
258 }
259
260 /*
261 * This seems to happen whenever the head has been disabled with one
262 * or more windows being active. This is harmless because we'll just
263 * reassign the window to the new head anyway.
264 */
265 if (old && owner == OWNER_MASK)
266 dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
267 old->pipe, owner);
268
269 value &= ~OWNER_MASK;
270
271 if (new)
272 value |= OWNER(new->pipe);
273 else
274 value |= OWNER_MASK;
275
276 tegra_dc_writel(dc, value, offset);
277
278 plane->dc = new;
279
280 return 0;
281}
282
283static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
284 struct tegra_plane *plane)
285{
286 u32 value;
287 int err;
288
289 if (!tegra_dc_owns_shared_plane(dc, plane)) {
290 err = tegra_shared_plane_set_owner(plane, dc);
291 if (err < 0)
292 return;
293 }
294
295 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
296 value |= MODE_FOUR_LINES;
297 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
298
299 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
300 value = SLOTS(1);
301 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
302
303 /* disable watermark */
304 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
305 value &= ~LATENCY_CTL_MODE_ENABLE;
306 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
307
308 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
309 value |= WATERMARK_MASK;
310 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
311
312 /* pipe meter */
313 value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
314 value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
315 tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
316
317 /* mempool entries */
318 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
319 value = MEMPOOL_ENTRIES(0x331);
320 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
321
322 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
323 value &= ~THREAD_NUM_MASK;
324 value |= THREAD_NUM(plane->base.index);
325 value |= THREAD_GROUP_ENABLE;
326 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
327
328 tegra_shared_plane_update(plane);
329 tegra_shared_plane_activate(plane);
330}
331
332static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
333 struct tegra_plane *plane)
334{
335 tegra_shared_plane_set_owner(plane, NULL);
336}
337
338static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
339 struct drm_plane_state *state)
340{
341 struct tegra_plane_state *plane_state = to_tegra_plane_state(state);
342 struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
343 struct tegra_bo_tiling *tiling = &plane_state->tiling;
344 struct tegra_dc *dc = to_tegra_dc(state->crtc);
345 int err;
346
347 /* no need for further checks if the plane is being disabled */
348 if (!state->crtc || !state->fb)
349 return 0;
350
351 err = tegra_plane_format(state->fb->format->format,
352 &plane_state->format,
353 &plane_state->swap);
354 if (err < 0)
355 return err;
356
357 err = tegra_fb_get_tiling(state->fb, tiling);
358 if (err < 0)
359 return err;
360
361 if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
362 !dc->soc->supports_block_linear) {
363 DRM_ERROR("hardware doesn't support block linear mode\n");
364 return -EINVAL;
365 }
366
367 /*
368 * Tegra doesn't support different strides for U and V planes so we
369 * error out if the user tries to display a framebuffer with such a
370 * configuration.
371 */
372 if (state->fb->format->num_planes > 2) {
373 if (state->fb->pitches[2] != state->fb->pitches[1]) {
374 DRM_ERROR("unsupported UV-plane configuration\n");
375 return -EINVAL;
376 }
377 }
378
379 /* XXX scaling is not yet supported, add a check here */
380
381 err = tegra_plane_state_add(&tegra->base, state);
382 if (err < 0)
383 return err;
384
385 return 0;
386}
387
388static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
389 struct drm_plane_state *old_state)
390{
391 struct tegra_plane *p = to_tegra_plane(plane);
392 struct tegra_dc *dc;
393 u32 value;
394 int err;
395
396 /* rien ne va plus */
397 if (!old_state || !old_state->crtc)
398 return;
399
400 dc = to_tegra_dc(old_state->crtc);
401
402 err = host1x_client_resume(&dc->client);
403 if (err < 0) {
404 dev_err(dc->dev, "failed to resume: %d\n", err);
405 return;
406 }
407
408 /*
409 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
410 * on planes that are already disabled. Make sure we fallback to the
411 * head for this particular state instead of crashing.
412 */
413 if (WARN_ON(p->dc == NULL))
414 p->dc = dc;
415
416 value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
417 value &= ~WIN_ENABLE;
418 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
419
420 tegra_dc_remove_shared_plane(dc, p);
421
422 host1x_client_suspend(&dc->client);
423}
424
425static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
426 struct drm_plane_state *old_state)
427{
428 struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
429 struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
430 unsigned int zpos = plane->state->normalized_zpos;
431 struct drm_framebuffer *fb = plane->state->fb;
432 struct tegra_plane *p = to_tegra_plane(plane);
433 dma_addr_t base;
434 u32 value;
435 int err;
436
437 /* rien ne va plus */
438 if (!plane->state->crtc || !plane->state->fb)
439 return;
440
441 if (!plane->state->visible) {
442 tegra_shared_plane_atomic_disable(plane, old_state);
443 return;
444 }
445
446 err = host1x_client_resume(&dc->client);
447 if (err < 0) {
448 dev_err(dc->dev, "failed to resume: %d\n", err);
449 return;
450 }
451
452 tegra_dc_assign_shared_plane(dc, p);
453
454 tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
455
456 /* blending */
457 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
458 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
459 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
460 tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
461
462 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
463 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
464 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
465 tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
466
467 value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
468 tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
469
470 /* bypass scaling */
471 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
472 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
473
474 value = INPUT_SCALER_VBYPASS | INPUT_SCALER_HBYPASS;
475 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
476
477 /* disable compression */
478 tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
479
480 base = state->iova[0] + fb->offsets[0];
481
482 tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
483 tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
484
485 value = V_POSITION(plane->state->crtc_y) |
486 H_POSITION(plane->state->crtc_x);
487 tegra_plane_writel(p, value, DC_WIN_POSITION);
488
489 value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
490 tegra_plane_writel(p, value, DC_WIN_SIZE);
491
492 value = WIN_ENABLE | COLOR_EXPAND;
493 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
494
495 value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
496 tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
497
498 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
499 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
500
501 value = PITCH(fb->pitches[0]);
502 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
503
504 value = CLAMP_BEFORE_BLEND | DEGAMMA_SRGB | INPUT_RANGE_FULL;
505 tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
506
507 value = OFFSET_X(plane->state->src_y >> 16) |
508 OFFSET_Y(plane->state->src_x >> 16);
509 tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
510
511 if (dc->soc->supports_block_linear) {
512 unsigned long height = state->tiling.value;
513
514 /* XXX */
515 switch (state->tiling.mode) {
516 case TEGRA_BO_TILING_MODE_PITCH:
517 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
518 DC_WINBUF_SURFACE_KIND_PITCH;
519 break;
520
521 /* XXX not supported on Tegra186 and later */
522 case TEGRA_BO_TILING_MODE_TILED:
523 value = DC_WINBUF_SURFACE_KIND_TILED;
524 break;
525
526 case TEGRA_BO_TILING_MODE_BLOCK:
527 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
528 DC_WINBUF_SURFACE_KIND_BLOCK;
529 break;
530 }
531
532 tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
533 }
534
535 /* disable gamut CSC */
536 value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
537 value &= ~CONTROL_CSC_ENABLE;
538 tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
539
540 host1x_client_suspend(&dc->client);
541}
542
543static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
544 .prepare_fb = tegra_plane_prepare_fb,
545 .cleanup_fb = tegra_plane_cleanup_fb,
546 .atomic_check = tegra_shared_plane_atomic_check,
547 .atomic_update = tegra_shared_plane_atomic_update,
548 .atomic_disable = tegra_shared_plane_atomic_disable,
549};
550
551struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
552 struct tegra_dc *dc,
553 unsigned int wgrp,
554 unsigned int index)
555{
556 enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
557 struct tegra_drm *tegra = drm->dev_private;
558 struct tegra_display_hub *hub = tegra->hub;
559 /* planes can be assigned to arbitrary CRTCs */
560 unsigned int possible_crtcs = 0x7;
561 struct tegra_shared_plane *plane;
562 unsigned int num_formats;
563 const u64 *modifiers;
564 struct drm_plane *p;
565 const u32 *formats;
566 int err;
567
568 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
569 if (!plane)
570 return ERR_PTR(-ENOMEM);
571
572 plane->base.offset = 0x0a00 + 0x0300 * index;
573 plane->base.index = index;
574
575 plane->wgrp = &hub->wgrps[wgrp];
576 plane->wgrp->parent = &dc->client;
577
578 p = &plane->base.base;
579
580 num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
581 formats = tegra_shared_plane_formats;
582 modifiers = tegra_shared_plane_modifiers;
583
584 err = drm_universal_plane_init(drm, p, possible_crtcs,
585 &tegra_plane_funcs, formats,
586 num_formats, modifiers, type, NULL);
587 if (err < 0) {
588 kfree(plane);
589 return ERR_PTR(err);
590 }
591
592 drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
593 drm_plane_create_zpos_property(p, 0, 0, 255);
594
595 return p;
596}
597
598static struct drm_private_state *
599tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
600{
601 struct tegra_display_hub_state *state;
602
603 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
604 if (!state)
605 return NULL;
606
607 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
608
609 return &state->base;
610}
611
612static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
613 struct drm_private_state *state)
614{
615 struct tegra_display_hub_state *hub_state =
616 to_tegra_display_hub_state(state);
617
618 kfree(hub_state);
619}
620
621static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
622 .atomic_duplicate_state = tegra_display_hub_duplicate_state,
623 .atomic_destroy_state = tegra_display_hub_destroy_state,
624};
625
626static struct tegra_display_hub_state *
627tegra_display_hub_get_state(struct tegra_display_hub *hub,
628 struct drm_atomic_state *state)
629{
630 struct drm_private_state *priv;
631
632 priv = drm_atomic_get_private_obj_state(state, &hub->base);
633 if (IS_ERR(priv))
634 return ERR_CAST(priv);
635
636 return to_tegra_display_hub_state(priv);
637}
638
639int tegra_display_hub_atomic_check(struct drm_device *drm,
640 struct drm_atomic_state *state)
641{
642 struct tegra_drm *tegra = drm->dev_private;
643 struct tegra_display_hub_state *hub_state;
644 struct drm_crtc_state *old, *new;
645 struct drm_crtc *crtc;
646 unsigned int i;
647
648 if (!tegra->hub)
649 return 0;
650
651 hub_state = tegra_display_hub_get_state(tegra->hub, state);
652 if (IS_ERR(hub_state))
653 return PTR_ERR(hub_state);
654
655 /*
656 * The display hub display clock needs to be fed by the display clock
657 * with the highest frequency to ensure proper functioning of all the
658 * displays.
659 *
660 * Note that this isn't used before Tegra186, but it doesn't hurt and
661 * conditionalizing it would make the code less clean.
662 */
663 for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
664 struct tegra_dc_state *dc = to_dc_state(new);
665
666 if (new->active) {
667 if (!hub_state->clk || dc->pclk > hub_state->rate) {
668 hub_state->dc = to_tegra_dc(dc->base.crtc);
669 hub_state->clk = hub_state->dc->clk;
670 hub_state->rate = dc->pclk;
671 }
672 }
673 }
674
675 return 0;
676}
677
678static void tegra_display_hub_update(struct tegra_dc *dc)
679{
680 u32 value;
681 int err;
682
683 err = host1x_client_resume(&dc->client);
684 if (err < 0) {
685 dev_err(dc->dev, "failed to resume: %d\n", err);
686 return;
687 }
688
689 value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
690 value &= ~LATENCY_EVENT;
691 tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
692
693 value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
694 value = CURS_SLOTS(1) | WGRP_SLOTS(1);
695 tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
696
697 tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
698 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
699 tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
700 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
701
702 host1x_client_suspend(&dc->client);
703}
704
705void tegra_display_hub_atomic_commit(struct drm_device *drm,
706 struct drm_atomic_state *state)
707{
708 struct tegra_drm *tegra = drm->dev_private;
709 struct tegra_display_hub *hub = tegra->hub;
710 struct tegra_display_hub_state *hub_state;
711 struct device *dev = hub->client.dev;
712 int err;
713
714 hub_state = to_tegra_display_hub_state(hub->base.state);
715
716 if (hub_state->clk) {
717 err = clk_set_rate(hub_state->clk, hub_state->rate);
718 if (err < 0)
719 dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
720 hub_state->clk, hub_state->rate);
721
722 err = clk_set_parent(hub->clk_disp, hub_state->clk);
723 if (err < 0)
724 dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
725 hub->clk_disp, hub_state->clk, err);
726 }
727
728 if (hub_state->dc)
729 tegra_display_hub_update(hub_state->dc);
730}
731
732static int tegra_display_hub_init(struct host1x_client *client)
733{
734 struct tegra_display_hub *hub = to_tegra_display_hub(client);
735 struct drm_device *drm = dev_get_drvdata(client->host);
736 struct tegra_drm *tegra = drm->dev_private;
737 struct tegra_display_hub_state *state;
738
739 state = kzalloc(sizeof(*state), GFP_KERNEL);
740 if (!state)
741 return -ENOMEM;
742
743 drm_atomic_private_obj_init(drm, &hub->base, &state->base,
744 &tegra_display_hub_state_funcs);
745
746 tegra->hub = hub;
747
748 return 0;
749}
750
751static int tegra_display_hub_exit(struct host1x_client *client)
752{
753 struct drm_device *drm = dev_get_drvdata(client->host);
754 struct tegra_drm *tegra = drm->dev_private;
755
756 drm_atomic_private_obj_fini(&tegra->hub->base);
757 tegra->hub = NULL;
758
759 return 0;
760}
761
762static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
763{
764 struct tegra_display_hub *hub = to_tegra_display_hub(client);
765 struct device *dev = client->dev;
766 unsigned int i = hub->num_heads;
767 int err;
768
769 err = reset_control_assert(hub->rst);
770 if (err < 0)
771 return err;
772
773 while (i--)
774 clk_disable_unprepare(hub->clk_heads[i]);
775
776 clk_disable_unprepare(hub->clk_hub);
777 clk_disable_unprepare(hub->clk_dsc);
778 clk_disable_unprepare(hub->clk_disp);
779
780 pm_runtime_put_sync(dev);
781
782 return 0;
783}
784
785static int tegra_display_hub_runtime_resume(struct host1x_client *client)
786{
787 struct tegra_display_hub *hub = to_tegra_display_hub(client);
788 struct device *dev = client->dev;
789 unsigned int i;
790 int err;
791
792 err = pm_runtime_get_sync(dev);
793 if (err < 0) {
794 dev_err(dev, "failed to get runtime PM: %d\n", err);
795 return err;
796 }
797
798 err = clk_prepare_enable(hub->clk_disp);
799 if (err < 0)
800 goto put_rpm;
801
802 err = clk_prepare_enable(hub->clk_dsc);
803 if (err < 0)
804 goto disable_disp;
805
806 err = clk_prepare_enable(hub->clk_hub);
807 if (err < 0)
808 goto disable_dsc;
809
810 for (i = 0; i < hub->num_heads; i++) {
811 err = clk_prepare_enable(hub->clk_heads[i]);
812 if (err < 0)
813 goto disable_heads;
814 }
815
816 err = reset_control_deassert(hub->rst);
817 if (err < 0)
818 goto disable_heads;
819
820 return 0;
821
822disable_heads:
823 while (i--)
824 clk_disable_unprepare(hub->clk_heads[i]);
825
826 clk_disable_unprepare(hub->clk_hub);
827disable_dsc:
828 clk_disable_unprepare(hub->clk_dsc);
829disable_disp:
830 clk_disable_unprepare(hub->clk_disp);
831put_rpm:
832 pm_runtime_put_sync(dev);
833 return err;
834}
835
836static const struct host1x_client_ops tegra_display_hub_ops = {
837 .init = tegra_display_hub_init,
838 .exit = tegra_display_hub_exit,
839 .suspend = tegra_display_hub_runtime_suspend,
840 .resume = tegra_display_hub_runtime_resume,
841};
842
843static int tegra_display_hub_probe(struct platform_device *pdev)
844{
845 struct device_node *child = NULL;
846 struct tegra_display_hub *hub;
847 struct clk *clk;
848 unsigned int i;
849 int err;
850
851 hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
852 if (!hub)
853 return -ENOMEM;
854
855 hub->soc = of_device_get_match_data(&pdev->dev);
856
857 hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
858 if (IS_ERR(hub->clk_disp)) {
859 err = PTR_ERR(hub->clk_disp);
860 return err;
861 }
862
863 if (hub->soc->supports_dsc) {
864 hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
865 if (IS_ERR(hub->clk_dsc)) {
866 err = PTR_ERR(hub->clk_dsc);
867 return err;
868 }
869 }
870
871 hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
872 if (IS_ERR(hub->clk_hub)) {
873 err = PTR_ERR(hub->clk_hub);
874 return err;
875 }
876
877 hub->rst = devm_reset_control_get(&pdev->dev, "misc");
878 if (IS_ERR(hub->rst)) {
879 err = PTR_ERR(hub->rst);
880 return err;
881 }
882
883 hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
884 sizeof(*hub->wgrps), GFP_KERNEL);
885 if (!hub->wgrps)
886 return -ENOMEM;
887
888 for (i = 0; i < hub->soc->num_wgrps; i++) {
889 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
890 char id[8];
891
892 snprintf(id, sizeof(id), "wgrp%u", i);
893 mutex_init(&wgrp->lock);
894 wgrp->usecount = 0;
895 wgrp->index = i;
896
897 wgrp->rst = devm_reset_control_get(&pdev->dev, id);
898 if (IS_ERR(wgrp->rst))
899 return PTR_ERR(wgrp->rst);
900
901 err = reset_control_assert(wgrp->rst);
902 if (err < 0)
903 return err;
904 }
905
906 hub->num_heads = of_get_child_count(pdev->dev.of_node);
907
908 hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
909 GFP_KERNEL);
910 if (!hub->clk_heads)
911 return -ENOMEM;
912
913 for (i = 0; i < hub->num_heads; i++) {
914 child = of_get_next_child(pdev->dev.of_node, child);
915 if (!child) {
916 dev_err(&pdev->dev, "failed to find node for head %u\n",
917 i);
918 return -ENODEV;
919 }
920
921 clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
922 if (IS_ERR(clk)) {
923 dev_err(&pdev->dev, "failed to get clock for head %u\n",
924 i);
925 of_node_put(child);
926 return PTR_ERR(clk);
927 }
928
929 hub->clk_heads[i] = clk;
930 }
931
932 of_node_put(child);
933
934 /* XXX: enable clock across reset? */
935 err = reset_control_assert(hub->rst);
936 if (err < 0)
937 return err;
938
939 platform_set_drvdata(pdev, hub);
940 pm_runtime_enable(&pdev->dev);
941
942 INIT_LIST_HEAD(&hub->client.list);
943 hub->client.ops = &tegra_display_hub_ops;
944 hub->client.dev = &pdev->dev;
945
946 err = host1x_client_register(&hub->client);
947 if (err < 0)
948 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
949 err);
950
951 err = devm_of_platform_populate(&pdev->dev);
952 if (err < 0)
953 goto unregister;
954
955 return err;
956
957unregister:
958 host1x_client_unregister(&hub->client);
959 pm_runtime_disable(&pdev->dev);
960 return err;
961}
962
963static int tegra_display_hub_remove(struct platform_device *pdev)
964{
965 struct tegra_display_hub *hub = platform_get_drvdata(pdev);
966 unsigned int i;
967 int err;
968
969 err = host1x_client_unregister(&hub->client);
970 if (err < 0) {
971 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
972 err);
973 }
974
975 for (i = 0; i < hub->soc->num_wgrps; i++) {
976 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
977
978 mutex_destroy(&wgrp->lock);
979 }
980
981 pm_runtime_disable(&pdev->dev);
982
983 return err;
984}
985
986static const struct tegra_display_hub_soc tegra186_display_hub = {
987 .num_wgrps = 6,
988 .supports_dsc = true,
989};
990
991static const struct tegra_display_hub_soc tegra194_display_hub = {
992 .num_wgrps = 6,
993 .supports_dsc = false,
994};
995
996static const struct of_device_id tegra_display_hub_of_match[] = {
997 {
998 .compatible = "nvidia,tegra194-display",
999 .data = &tegra194_display_hub
1000 }, {
1001 .compatible = "nvidia,tegra186-display",
1002 .data = &tegra186_display_hub
1003 }, {
1004 /* sentinel */
1005 }
1006};
1007MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1008
1009struct platform_driver tegra_display_hub_driver = {
1010 .driver = {
1011 .name = "tegra-display-hub",
1012 .of_match_table = tegra_display_hub_of_match,
1013 },
1014 .probe = tegra_display_hub_probe,
1015 .remove = tegra_display_hub_remove,
1016};