Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
   4 */
   5
   6#include <linux/clk.h>
   7#include <linux/delay.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/host1x.h>
  10#include <linux/module.h>
  11#include <linux/of.h>
 
  12#include <linux/of_graph.h>
  13#include <linux/of_platform.h>
  14#include <linux/platform_device.h>
  15#include <linux/pm_runtime.h>
  16#include <linux/reset.h>
  17
  18#include <drm/drm_atomic.h>
  19#include <drm/drm_atomic_helper.h>
  20#include <drm/drm_blend.h>
  21#include <drm/drm_fourcc.h>
  22#include <drm/drm_framebuffer.h>
  23#include <drm/drm_probe_helper.h>
  24
  25#include "drm.h"
  26#include "dc.h"
  27#include "plane.h"
  28
  29#define NFB 24
  30
  31static const u32 tegra_shared_plane_formats[] = {
  32	DRM_FORMAT_ARGB1555,
  33	DRM_FORMAT_RGB565,
  34	DRM_FORMAT_RGBA5551,
  35	DRM_FORMAT_ARGB8888,
  36	DRM_FORMAT_ABGR8888,
  37	/* new on Tegra114 */
  38	DRM_FORMAT_ABGR4444,
  39	DRM_FORMAT_ABGR1555,
  40	DRM_FORMAT_BGRA5551,
  41	DRM_FORMAT_XRGB1555,
  42	DRM_FORMAT_RGBX5551,
  43	DRM_FORMAT_XBGR1555,
  44	DRM_FORMAT_BGRX5551,
  45	DRM_FORMAT_BGR565,
  46	DRM_FORMAT_XRGB8888,
  47	DRM_FORMAT_XBGR8888,
  48	/* planar formats */
  49	DRM_FORMAT_UYVY,
  50	DRM_FORMAT_YUYV,
  51	DRM_FORMAT_YUV420,
  52	DRM_FORMAT_YUV422,
  53};
  54
  55static const u64 tegra_shared_plane_modifiers[] = {
  56	DRM_FORMAT_MOD_LINEAR,
  57	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
  58	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
  59	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
  60	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
  61	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
  62	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
  63	/*
  64	 * The GPU sector layout is only supported on Tegra194, but these will
  65	 * be filtered out later on by ->format_mod_supported() on SoCs where
  66	 * it isn't supported.
  67	 */
  68	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  69	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  70	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  71	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  72	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  73	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  74	/* sentinel */
  75	DRM_FORMAT_MOD_INVALID
  76};
  77
  78static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
  79					      unsigned int offset)
  80{
  81	if (offset >= 0x500 && offset <= 0x581) {
  82		offset = 0x000 + (offset - 0x500);
  83		return plane->offset + offset;
  84	}
  85
  86	if (offset >= 0x700 && offset <= 0x73c) {
  87		offset = 0x180 + (offset - 0x700);
  88		return plane->offset + offset;
  89	}
  90
  91	if (offset >= 0x800 && offset <= 0x83e) {
  92		offset = 0x1c0 + (offset - 0x800);
  93		return plane->offset + offset;
  94	}
  95
  96	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
  97
  98	return plane->offset + offset;
  99}
 100
 101static inline u32 tegra_plane_readl(struct tegra_plane *plane,
 102				    unsigned int offset)
 103{
 104	return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
 105}
 106
 107static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
 108				      unsigned int offset)
 109{
 110	tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
 111}
 112
 113static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
 114{
 115	int err = 0;
 116
 117	mutex_lock(&wgrp->lock);
 118
 119	if (wgrp->usecount == 0) {
 120		err = host1x_client_resume(wgrp->parent);
 121		if (err < 0) {
 122			dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
 123			goto unlock;
 124		}
 125
 126		reset_control_deassert(wgrp->rst);
 127	}
 128
 129	wgrp->usecount++;
 130
 131unlock:
 132	mutex_unlock(&wgrp->lock);
 133	return err;
 134}
 135
 136static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
 137{
 138	int err;
 139
 140	mutex_lock(&wgrp->lock);
 141
 142	if (wgrp->usecount == 1) {
 143		err = reset_control_assert(wgrp->rst);
 144		if (err < 0) {
 145			pr_err("failed to assert reset for window group %u\n",
 146			       wgrp->index);
 147		}
 148
 149		host1x_client_suspend(wgrp->parent);
 150	}
 151
 152	wgrp->usecount--;
 153	mutex_unlock(&wgrp->lock);
 154}
 155
 156int tegra_display_hub_prepare(struct tegra_display_hub *hub)
 157{
 158	unsigned int i;
 159
 160	/*
 161	 * XXX Enabling/disabling windowgroups needs to happen when the owner
 162	 * display controller is disabled. There's currently no good point at
 163	 * which this could be executed, so unconditionally enable all window
 164	 * groups for now.
 165	 */
 166	for (i = 0; i < hub->soc->num_wgrps; i++) {
 167		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 168
 169		/* Skip orphaned window group whose parent DC is disabled */
 170		if (wgrp->parent)
 171			tegra_windowgroup_enable(wgrp);
 172	}
 173
 174	return 0;
 175}
 176
 177void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
 178{
 179	unsigned int i;
 180
 181	/*
 182	 * XXX Remove this once window groups can be more fine-grainedly
 183	 * enabled and disabled.
 184	 */
 185	for (i = 0; i < hub->soc->num_wgrps; i++) {
 186		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 187
 188		/* Skip orphaned window group whose parent DC is disabled */
 189		if (wgrp->parent)
 190			tegra_windowgroup_disable(wgrp);
 191	}
 192}
 193
 194static void tegra_shared_plane_update(struct tegra_plane *plane)
 195{
 196	struct tegra_dc *dc = plane->dc;
 197	unsigned long timeout;
 198	u32 mask, value;
 199
 200	mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
 201	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
 202
 203	timeout = jiffies + msecs_to_jiffies(1000);
 204
 205	while (time_before(jiffies, timeout)) {
 206		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 207		if ((value & mask) == 0)
 208			break;
 209
 210		usleep_range(100, 400);
 211	}
 212}
 213
 214static void tegra_shared_plane_activate(struct tegra_plane *plane)
 215{
 216	struct tegra_dc *dc = plane->dc;
 217	unsigned long timeout;
 218	u32 mask, value;
 219
 220	mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
 221	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
 222
 223	timeout = jiffies + msecs_to_jiffies(1000);
 224
 225	while (time_before(jiffies, timeout)) {
 226		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 227		if ((value & mask) == 0)
 228			break;
 229
 230		usleep_range(100, 400);
 231	}
 232}
 233
 234static unsigned int
 235tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
 236{
 237	unsigned int offset =
 238		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
 239
 240	return tegra_dc_readl(dc, offset) & OWNER_MASK;
 241}
 242
 243static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
 244				       struct tegra_plane *plane)
 245{
 246	struct device *dev = dc->dev;
 247
 248	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
 249		if (plane->dc == dc)
 250			return true;
 251
 252		dev_WARN(dev, "head %u owns window %u but is not attached\n",
 253			 dc->pipe, plane->index);
 254	}
 255
 256	return false;
 257}
 258
 259static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
 260					struct tegra_dc *new)
 261{
 262	unsigned int offset =
 263		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
 264	struct tegra_dc *old = plane->dc, *dc = new ? new : old;
 265	struct device *dev = new ? new->dev : old->dev;
 266	unsigned int owner, index = plane->index;
 267	u32 value;
 268
 269	value = tegra_dc_readl(dc, offset);
 270	owner = value & OWNER_MASK;
 271
 272	if (new && (owner != OWNER_MASK && owner != new->pipe)) {
 273		dev_WARN(dev, "window %u owned by head %u\n", index, owner);
 274		return -EBUSY;
 275	}
 276
 277	/*
 278	 * This seems to happen whenever the head has been disabled with one
 279	 * or more windows being active. This is harmless because we'll just
 280	 * reassign the window to the new head anyway.
 281	 */
 282	if (old && owner == OWNER_MASK)
 283		dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
 284			old->pipe, owner);
 285
 286	value &= ~OWNER_MASK;
 287
 288	if (new)
 289		value |= OWNER(new->pipe);
 290	else
 291		value |= OWNER_MASK;
 292
 293	tegra_dc_writel(dc, value, offset);
 294
 295	plane->dc = new;
 296
 297	return 0;
 298}
 299
 300static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
 301{
 302	static const unsigned int coeffs[192] = {
 303		0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
 304		0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
 305		0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
 306		0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
 307		0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
 308		0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
 309		0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
 310		0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
 311		0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
 312		0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
 313		0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
 314		0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
 315		0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
 316		0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
 317		0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
 318		0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
 319		0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
 320		0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
 321		0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
 322		0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
 323		0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
 324		0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
 325		0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
 326		0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
 327		0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
 328		0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
 329		0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
 330		0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
 331		0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
 332		0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
 333		0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
 334		0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
 335		0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
 336		0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
 337		0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
 338		0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
 339		0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
 340		0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
 341		0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
 342		0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
 343		0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
 344		0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
 345		0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
 346		0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
 347		0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
 348		0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
 349		0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
 350		0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
 351	};
 352	unsigned int ratio, row, column;
 353
 354	for (ratio = 0; ratio <= 2; ratio++) {
 355		for (row = 0; row <= 15; row++) {
 356			for (column = 0; column <= 3; column++) {
 357				unsigned int index = (ratio << 6) + (row << 2) + column;
 358				u32 value;
 359
 360				value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
 361				tegra_plane_writel(plane, value,
 362						   DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
 363			}
 364		}
 365	}
 366}
 367
 368static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
 369					 struct tegra_plane *plane)
 370{
 371	u32 value;
 372	int err;
 373
 374	if (!tegra_dc_owns_shared_plane(dc, plane)) {
 375		err = tegra_shared_plane_set_owner(plane, dc);
 376		if (err < 0)
 377			return;
 378	}
 379
 380	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
 381	value |= MODE_FOUR_LINES;
 382	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
 383
 384	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
 385	value = SLOTS(1);
 386	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
 387
 388	/* disable watermark */
 389	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
 390	value &= ~LATENCY_CTL_MODE_ENABLE;
 391	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
 392
 393	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
 394	value |= WATERMARK_MASK;
 395	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
 396
 397	/* pipe meter */
 398	value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
 399	value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
 400	tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
 401
 402	/* mempool entries */
 403	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
 404	value = MEMPOOL_ENTRIES(0x331);
 405	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
 406
 407	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
 408	value &= ~THREAD_NUM_MASK;
 409	value |= THREAD_NUM(plane->base.index);
 410	value |= THREAD_GROUP_ENABLE;
 411	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
 412
 413	tegra_shared_plane_setup_scaler(plane);
 414
 415	tegra_shared_plane_update(plane);
 416	tegra_shared_plane_activate(plane);
 417}
 418
 419static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
 420					 struct tegra_plane *plane)
 421{
 422	tegra_shared_plane_set_owner(plane, NULL);
 423}
 424
 425static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
 426					   struct drm_atomic_state *state)
 427{
 428	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
 429										 plane);
 430	struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
 431	struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
 432	struct tegra_bo_tiling *tiling = &plane_state->tiling;
 433	struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
 434	int err;
 435
 436	/* no need for further checks if the plane is being disabled */
 437	if (!new_plane_state->crtc || !new_plane_state->fb)
 438		return 0;
 439
 440	err = tegra_plane_format(new_plane_state->fb->format->format,
 441				 &plane_state->format,
 442				 &plane_state->swap);
 443	if (err < 0)
 444		return err;
 445
 446	err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
 447	if (err < 0)
 448		return err;
 449
 450	if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
 451	    !dc->soc->supports_block_linear) {
 452		DRM_ERROR("hardware doesn't support block linear mode\n");
 453		return -EINVAL;
 454	}
 455
 456	if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
 457	    !dc->soc->supports_sector_layout) {
 458		DRM_ERROR("hardware doesn't support GPU sector layout\n");
 459		return -EINVAL;
 460	}
 461
 462	/*
 463	 * Tegra doesn't support different strides for U and V planes so we
 464	 * error out if the user tries to display a framebuffer with such a
 465	 * configuration.
 466	 */
 467	if (new_plane_state->fb->format->num_planes > 2) {
 468		if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
 469			DRM_ERROR("unsupported UV-plane configuration\n");
 470			return -EINVAL;
 471		}
 472	}
 473
 474	/* XXX scaling is not yet supported, add a check here */
 475
 476	err = tegra_plane_state_add(&tegra->base, new_plane_state);
 477	if (err < 0)
 478		return err;
 479
 480	return 0;
 481}
 482
 483static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
 484					      struct drm_atomic_state *state)
 485{
 486	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 487									   plane);
 488	struct tegra_plane *p = to_tegra_plane(plane);
 489	struct tegra_dc *dc;
 490	u32 value;
 491	int err;
 492
 493	/* rien ne va plus */
 494	if (!old_state || !old_state->crtc)
 495		return;
 496
 497	dc = to_tegra_dc(old_state->crtc);
 498
 499	err = host1x_client_resume(&dc->client);
 500	if (err < 0) {
 501		dev_err(dc->dev, "failed to resume: %d\n", err);
 502		return;
 503	}
 504
 505	/*
 506	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
 507	 * on planes that are already disabled. Make sure we fallback to the
 508	 * head for this particular state instead of crashing.
 509	 */
 510	if (WARN_ON(p->dc == NULL))
 511		p->dc = dc;
 512
 513	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
 514	value &= ~WIN_ENABLE;
 515	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
 516
 517	tegra_dc_remove_shared_plane(dc, p);
 518
 519	host1x_client_suspend(&dc->client);
 520}
 521
 522static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
 523{
 524	u64 tmp, tmp1;
 525
 526	tmp = (u64)dfixed_trunc(in);
 527	tmp1 = (tmp << NFB) + ((u64)out >> 1);
 528	do_div(tmp1, out);
 
 529
 530	return lower_32_bits(tmp1);
 531}
 532
 533static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
 534					     struct drm_atomic_state *state)
 535{
 536	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 537									   plane);
 538	struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
 539	struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
 540	unsigned int zpos = new_state->normalized_zpos;
 541	struct drm_framebuffer *fb = new_state->fb;
 542	struct tegra_plane *p = to_tegra_plane(plane);
 543	u32 value, min_width, bypass = 0;
 544	dma_addr_t base, addr_flag = 0;
 545	unsigned int bpc, planes;
 546	bool yuv;
 547	int err;
 548
 549	/* rien ne va plus */
 550	if (!new_state->crtc || !new_state->fb)
 551		return;
 552
 553	if (!new_state->visible) {
 554		tegra_shared_plane_atomic_disable(plane, state);
 555		return;
 556	}
 557
 558	err = host1x_client_resume(&dc->client);
 559	if (err < 0) {
 560		dev_err(dc->dev, "failed to resume: %d\n", err);
 561		return;
 562	}
 563
 564	yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
 565
 566	tegra_dc_assign_shared_plane(dc, p);
 567
 568	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
 569
 570	/* blending */
 571	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
 572		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
 573		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
 574	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
 575
 576	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
 577		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
 578		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
 579	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
 580
 581	value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
 582	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
 583
 584	/* scaling */
 585	min_width = min(new_state->src_w >> 16, new_state->crtc_w);
 586
 587	value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
 588
 589	if (min_width < MAX_PIXELS_5TAP444(value)) {
 590		value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
 591	} else {
 592		value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
 593
 594		if (min_width < MAX_PIXELS_2TAP444(value))
 595			value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
 596		else
 597			dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
 598	}
 599
 600	value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
 601	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
 602
 603	if (new_state->src_w != new_state->crtc_w << 16) {
 604		fixed20_12 width = dfixed_init(new_state->src_w >> 16);
 605		u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
 606		u32 init = (1 << (NFB - 1)) + (incr >> 1);
 607
 608		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
 609		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
 610	} else {
 611		bypass |= INPUT_SCALER_HBYPASS;
 612	}
 613
 614	if (new_state->src_h != new_state->crtc_h << 16) {
 615		fixed20_12 height = dfixed_init(new_state->src_h >> 16);
 616		u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
 617		u32 init = (1 << (NFB - 1)) + (incr >> 1);
 618
 619		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
 620		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
 621	} else {
 622		bypass |= INPUT_SCALER_VBYPASS;
 623	}
 624
 625	tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
 626
 627	/* disable compression */
 628	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
 629
 630#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 631	/*
 632	 * Physical address bit 39 in Tegra194 is used as a switch for special
 633	 * logic that swizzles the memory using either the legacy Tegra or the
 634	 * dGPU sector layout.
 635	 */
 636	if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
 637		addr_flag = BIT_ULL(39);
 638#endif
 639
 640	base = tegra_plane_state->iova[0] + fb->offsets[0];
 641	base |= addr_flag;
 642
 643	tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
 644	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
 645
 646	value = V_POSITION(new_state->crtc_y) |
 647		H_POSITION(new_state->crtc_x);
 648	tegra_plane_writel(p, value, DC_WIN_POSITION);
 649
 650	value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
 651	tegra_plane_writel(p, value, DC_WIN_SIZE);
 652
 653	value = WIN_ENABLE | COLOR_EXPAND;
 654	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
 655
 656	value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
 657	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
 658
 659	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
 660	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
 661
 662	value = PITCH(fb->pitches[0]);
 663	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
 664
 665	if (yuv && planes > 1) {
 666		base = tegra_plane_state->iova[1] + fb->offsets[1];
 667		base |= addr_flag;
 668
 669		tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
 670		tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
 671
 672		if (planes > 2) {
 673			base = tegra_plane_state->iova[2] + fb->offsets[2];
 674			base |= addr_flag;
 675
 676			tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
 677			tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
 678		}
 679
 680		value = PITCH_U(fb->pitches[1]);
 681
 682		if (planes > 2)
 683			value |= PITCH_V(fb->pitches[2]);
 684
 
 685		tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
 686	} else {
 687		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
 688		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
 689		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
 690		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
 691		tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
 692	}
 693
 694	value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
 695
 696	if (yuv) {
 697		if (bpc < 12)
 698			value |= DEGAMMA_YUV8_10;
 699		else
 700			value |= DEGAMMA_YUV12;
 701
 702		/* XXX parameterize */
 703		value |= COLOR_SPACE_YUV_2020;
 704	} else {
 705		if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
 706			value |= DEGAMMA_SRGB;
 707	}
 708
 709	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
 710
 711	value = OFFSET_X(new_state->src_y >> 16) |
 712		OFFSET_Y(new_state->src_x >> 16);
 713	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
 714
 715	if (dc->soc->supports_block_linear) {
 716		unsigned long height = tegra_plane_state->tiling.value;
 717
 718		/* XXX */
 719		switch (tegra_plane_state->tiling.mode) {
 720		case TEGRA_BO_TILING_MODE_PITCH:
 721			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
 722				DC_WINBUF_SURFACE_KIND_PITCH;
 723			break;
 724
 725		/* XXX not supported on Tegra186 and later */
 726		case TEGRA_BO_TILING_MODE_TILED:
 727			value = DC_WINBUF_SURFACE_KIND_TILED;
 728			break;
 729
 730		case TEGRA_BO_TILING_MODE_BLOCK:
 731			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
 732				DC_WINBUF_SURFACE_KIND_BLOCK;
 733			break;
 734		}
 735
 736		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
 737	}
 738
 739	/* disable gamut CSC */
 740	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
 741	value &= ~CONTROL_CSC_ENABLE;
 742	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
 743
 744	host1x_client_suspend(&dc->client);
 745}
 746
 747static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
 748	.prepare_fb = tegra_plane_prepare_fb,
 749	.cleanup_fb = tegra_plane_cleanup_fb,
 750	.atomic_check = tegra_shared_plane_atomic_check,
 751	.atomic_update = tegra_shared_plane_atomic_update,
 752	.atomic_disable = tegra_shared_plane_atomic_disable,
 753};
 754
 755struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
 756					    struct tegra_dc *dc,
 757					    unsigned int wgrp,
 758					    unsigned int index)
 759{
 760	enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
 761	struct tegra_drm *tegra = drm->dev_private;
 762	struct tegra_display_hub *hub = tegra->hub;
 763	struct tegra_shared_plane *plane;
 764	unsigned int possible_crtcs;
 765	unsigned int num_formats;
 766	const u64 *modifiers;
 767	struct drm_plane *p;
 768	const u32 *formats;
 769	int err;
 770
 771	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
 772	if (!plane)
 773		return ERR_PTR(-ENOMEM);
 774
 775	plane->base.offset = 0x0a00 + 0x0300 * index;
 776	plane->base.index = index;
 777
 778	plane->wgrp = &hub->wgrps[wgrp];
 779	plane->wgrp->parent = &dc->client;
 780
 781	p = &plane->base.base;
 782
 783	/* planes can be assigned to arbitrary CRTCs */
 784	possible_crtcs = BIT(tegra->num_crtcs) - 1;
 785
 786	num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
 787	formats = tegra_shared_plane_formats;
 788	modifiers = tegra_shared_plane_modifiers;
 789
 790	err = drm_universal_plane_init(drm, p, possible_crtcs,
 791				       &tegra_plane_funcs, formats,
 792				       num_formats, modifiers, type, NULL);
 793	if (err < 0) {
 794		kfree(plane);
 795		return ERR_PTR(err);
 796	}
 797
 798	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
 799	drm_plane_create_zpos_property(p, 0, 0, 255);
 800
 801	return p;
 802}
 803
 804static struct drm_private_state *
 805tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
 806{
 807	struct tegra_display_hub_state *state;
 808
 809	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
 810	if (!state)
 811		return NULL;
 812
 813	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
 814
 815	return &state->base;
 816}
 817
 818static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
 819					    struct drm_private_state *state)
 820{
 821	struct tegra_display_hub_state *hub_state =
 822		to_tegra_display_hub_state(state);
 823
 824	kfree(hub_state);
 825}
 826
 827static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
 828	.atomic_duplicate_state = tegra_display_hub_duplicate_state,
 829	.atomic_destroy_state = tegra_display_hub_destroy_state,
 830};
 831
 832static struct tegra_display_hub_state *
 833tegra_display_hub_get_state(struct tegra_display_hub *hub,
 834			    struct drm_atomic_state *state)
 835{
 836	struct drm_private_state *priv;
 837
 838	priv = drm_atomic_get_private_obj_state(state, &hub->base);
 839	if (IS_ERR(priv))
 840		return ERR_CAST(priv);
 841
 842	return to_tegra_display_hub_state(priv);
 843}
 844
 845int tegra_display_hub_atomic_check(struct drm_device *drm,
 846				   struct drm_atomic_state *state)
 847{
 848	struct tegra_drm *tegra = drm->dev_private;
 849	struct tegra_display_hub_state *hub_state;
 850	struct drm_crtc_state *old, *new;
 851	struct drm_crtc *crtc;
 852	unsigned int i;
 853
 854	if (!tegra->hub)
 855		return 0;
 856
 857	hub_state = tegra_display_hub_get_state(tegra->hub, state);
 858	if (IS_ERR(hub_state))
 859		return PTR_ERR(hub_state);
 860
 861	/*
 862	 * The display hub display clock needs to be fed by the display clock
 863	 * with the highest frequency to ensure proper functioning of all the
 864	 * displays.
 865	 *
 866	 * Note that this isn't used before Tegra186, but it doesn't hurt and
 867	 * conditionalizing it would make the code less clean.
 868	 */
 869	for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
 870		struct tegra_dc_state *dc = to_dc_state(new);
 871
 872		if (new->active) {
 873			if (!hub_state->clk || dc->pclk > hub_state->rate) {
 874				hub_state->dc = to_tegra_dc(dc->base.crtc);
 875				hub_state->clk = hub_state->dc->clk;
 876				hub_state->rate = dc->pclk;
 877			}
 878		}
 879	}
 880
 881	return 0;
 882}
 883
 884static void tegra_display_hub_update(struct tegra_dc *dc)
 885{
 886	u32 value;
 887	int err;
 888
 889	err = host1x_client_resume(&dc->client);
 890	if (err < 0) {
 891		dev_err(dc->dev, "failed to resume: %d\n", err);
 892		return;
 893	}
 894
 895	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
 896	value &= ~LATENCY_EVENT;
 897	tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
 898
 899	value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
 900	value = CURS_SLOTS(1) | WGRP_SLOTS(1);
 901	tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
 902
 903	tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
 904	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 905	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
 906	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 907
 908	host1x_client_suspend(&dc->client);
 909}
 910
 911void tegra_display_hub_atomic_commit(struct drm_device *drm,
 912				     struct drm_atomic_state *state)
 913{
 914	struct tegra_drm *tegra = drm->dev_private;
 915	struct tegra_display_hub *hub = tegra->hub;
 916	struct tegra_display_hub_state *hub_state;
 917	struct device *dev = hub->client.dev;
 918	int err;
 919
 920	hub_state = to_tegra_display_hub_state(hub->base.state);
 921
 922	if (hub_state->clk) {
 923		err = clk_set_rate(hub_state->clk, hub_state->rate);
 924		if (err < 0)
 925			dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
 926				hub_state->clk, hub_state->rate);
 927
 928		err = clk_set_parent(hub->clk_disp, hub_state->clk);
 929		if (err < 0)
 930			dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
 931				hub->clk_disp, hub_state->clk, err);
 932	}
 933
 934	if (hub_state->dc)
 935		tegra_display_hub_update(hub_state->dc);
 936}
 937
 938static int tegra_display_hub_init(struct host1x_client *client)
 939{
 940	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 941	struct drm_device *drm = dev_get_drvdata(client->host);
 942	struct tegra_drm *tegra = drm->dev_private;
 943	struct tegra_display_hub_state *state;
 944
 945	state = kzalloc(sizeof(*state), GFP_KERNEL);
 946	if (!state)
 947		return -ENOMEM;
 948
 949	drm_atomic_private_obj_init(drm, &hub->base, &state->base,
 950				    &tegra_display_hub_state_funcs);
 951
 952	tegra->hub = hub;
 953
 954	return 0;
 955}
 956
 957static int tegra_display_hub_exit(struct host1x_client *client)
 958{
 959	struct drm_device *drm = dev_get_drvdata(client->host);
 960	struct tegra_drm *tegra = drm->dev_private;
 961
 962	drm_atomic_private_obj_fini(&tegra->hub->base);
 963	tegra->hub = NULL;
 964
 965	return 0;
 966}
 967
 968static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
 969{
 970	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 971	struct device *dev = client->dev;
 972	unsigned int i = hub->num_heads;
 973	int err;
 974
 975	err = reset_control_assert(hub->rst);
 976	if (err < 0)
 977		return err;
 978
 979	while (i--)
 980		clk_disable_unprepare(hub->clk_heads[i]);
 981
 982	clk_disable_unprepare(hub->clk_hub);
 983	clk_disable_unprepare(hub->clk_dsc);
 984	clk_disable_unprepare(hub->clk_disp);
 985
 986	pm_runtime_put_sync(dev);
 987
 988	return 0;
 989}
 990
 991static int tegra_display_hub_runtime_resume(struct host1x_client *client)
 992{
 993	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 994	struct device *dev = client->dev;
 995	unsigned int i;
 996	int err;
 997
 998	err = pm_runtime_resume_and_get(dev);
 999	if (err < 0) {
1000		dev_err(dev, "failed to get runtime PM: %d\n", err);
1001		return err;
1002	}
1003
1004	err = clk_prepare_enable(hub->clk_disp);
1005	if (err < 0)
1006		goto put_rpm;
1007
1008	err = clk_prepare_enable(hub->clk_dsc);
1009	if (err < 0)
1010		goto disable_disp;
1011
1012	err = clk_prepare_enable(hub->clk_hub);
1013	if (err < 0)
1014		goto disable_dsc;
1015
1016	for (i = 0; i < hub->num_heads; i++) {
1017		err = clk_prepare_enable(hub->clk_heads[i]);
1018		if (err < 0)
1019			goto disable_heads;
1020	}
1021
1022	err = reset_control_deassert(hub->rst);
1023	if (err < 0)
1024		goto disable_heads;
1025
1026	return 0;
1027
1028disable_heads:
1029	while (i--)
1030		clk_disable_unprepare(hub->clk_heads[i]);
1031
1032	clk_disable_unprepare(hub->clk_hub);
1033disable_dsc:
1034	clk_disable_unprepare(hub->clk_dsc);
1035disable_disp:
1036	clk_disable_unprepare(hub->clk_disp);
1037put_rpm:
1038	pm_runtime_put_sync(dev);
1039	return err;
1040}
1041
1042static const struct host1x_client_ops tegra_display_hub_ops = {
1043	.init = tegra_display_hub_init,
1044	.exit = tegra_display_hub_exit,
1045	.suspend = tegra_display_hub_runtime_suspend,
1046	.resume = tegra_display_hub_runtime_resume,
1047};
1048
1049static int tegra_display_hub_probe(struct platform_device *pdev)
1050{
1051	u64 dma_mask = dma_get_mask(pdev->dev.parent);
1052	struct device_node *child = NULL;
1053	struct tegra_display_hub *hub;
1054	struct clk *clk;
1055	unsigned int i;
1056	int err;
1057
1058	err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1059	if (err < 0) {
1060		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1061		return err;
1062	}
1063
1064	hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1065	if (!hub)
1066		return -ENOMEM;
1067
1068	hub->soc = of_device_get_match_data(&pdev->dev);
1069
1070	hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1071	if (IS_ERR(hub->clk_disp)) {
1072		err = PTR_ERR(hub->clk_disp);
1073		return err;
1074	}
1075
1076	if (hub->soc->supports_dsc) {
1077		hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1078		if (IS_ERR(hub->clk_dsc)) {
1079			err = PTR_ERR(hub->clk_dsc);
1080			return err;
1081		}
1082	}
1083
1084	hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1085	if (IS_ERR(hub->clk_hub)) {
1086		err = PTR_ERR(hub->clk_hub);
1087		return err;
1088	}
1089
1090	hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1091	if (IS_ERR(hub->rst)) {
1092		err = PTR_ERR(hub->rst);
1093		return err;
1094	}
1095
1096	hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1097				  sizeof(*hub->wgrps), GFP_KERNEL);
1098	if (!hub->wgrps)
1099		return -ENOMEM;
1100
1101	for (i = 0; i < hub->soc->num_wgrps; i++) {
1102		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1103		char id[16];
1104
1105		snprintf(id, sizeof(id), "wgrp%u", i);
1106		mutex_init(&wgrp->lock);
1107		wgrp->usecount = 0;
1108		wgrp->index = i;
1109
1110		wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1111		if (IS_ERR(wgrp->rst))
1112			return PTR_ERR(wgrp->rst);
1113
1114		err = reset_control_assert(wgrp->rst);
1115		if (err < 0)
1116			return err;
1117	}
1118
1119	hub->num_heads = of_get_child_count(pdev->dev.of_node);
1120
1121	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1122				      GFP_KERNEL);
1123	if (!hub->clk_heads)
1124		return -ENOMEM;
1125
1126	for (i = 0; i < hub->num_heads; i++) {
1127		child = of_get_next_child(pdev->dev.of_node, child);
1128		if (!child) {
1129			dev_err(&pdev->dev, "failed to find node for head %u\n",
1130				i);
1131			return -ENODEV;
1132		}
1133
1134		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1135		if (IS_ERR(clk)) {
1136			dev_err(&pdev->dev, "failed to get clock for head %u\n",
1137				i);
1138			of_node_put(child);
1139			return PTR_ERR(clk);
1140		}
1141
1142		hub->clk_heads[i] = clk;
1143	}
1144
1145	of_node_put(child);
1146
1147	/* XXX: enable clock across reset? */
1148	err = reset_control_assert(hub->rst);
1149	if (err < 0)
1150		return err;
1151
1152	platform_set_drvdata(pdev, hub);
1153	pm_runtime_enable(&pdev->dev);
1154
1155	INIT_LIST_HEAD(&hub->client.list);
1156	hub->client.ops = &tegra_display_hub_ops;
1157	hub->client.dev = &pdev->dev;
1158
1159	err = host1x_client_register(&hub->client);
1160	if (err < 0)
1161		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1162			err);
1163
1164	err = devm_of_platform_populate(&pdev->dev);
1165	if (err < 0)
1166		goto unregister;
1167
1168	return err;
1169
1170unregister:
1171	host1x_client_unregister(&hub->client);
1172	pm_runtime_disable(&pdev->dev);
1173	return err;
1174}
1175
1176static void tegra_display_hub_remove(struct platform_device *pdev)
1177{
1178	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1179	unsigned int i;
 
1180
1181	host1x_client_unregister(&hub->client);
 
 
 
 
1182
1183	for (i = 0; i < hub->soc->num_wgrps; i++) {
1184		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1185
1186		mutex_destroy(&wgrp->lock);
1187	}
1188
1189	pm_runtime_disable(&pdev->dev);
 
 
1190}
1191
1192static const struct tegra_display_hub_soc tegra186_display_hub = {
1193	.num_wgrps = 6,
1194	.supports_dsc = true,
1195};
1196
1197static const struct tegra_display_hub_soc tegra194_display_hub = {
1198	.num_wgrps = 6,
1199	.supports_dsc = false,
1200};
1201
1202static const struct of_device_id tegra_display_hub_of_match[] = {
1203	{
1204		.compatible = "nvidia,tegra194-display",
1205		.data = &tegra194_display_hub
1206	}, {
1207		.compatible = "nvidia,tegra186-display",
1208		.data = &tegra186_display_hub
1209	}, {
1210		/* sentinel */
1211	}
1212};
1213MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1214
1215struct platform_driver tegra_display_hub_driver = {
1216	.driver = {
1217		.name = "tegra-display-hub",
1218		.of_match_table = tegra_display_hub_of_match,
1219	},
1220	.probe = tegra_display_hub_probe,
1221	.remove = tegra_display_hub_remove,
1222};
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
   4 */
   5
   6#include <linux/clk.h>
   7#include <linux/delay.h>
 
   8#include <linux/host1x.h>
   9#include <linux/module.h>
  10#include <linux/of.h>
  11#include <linux/of_device.h>
  12#include <linux/of_graph.h>
 
  13#include <linux/platform_device.h>
  14#include <linux/pm_runtime.h>
  15#include <linux/reset.h>
  16
  17#include <drm/drm_atomic.h>
  18#include <drm/drm_atomic_helper.h>
 
  19#include <drm/drm_fourcc.h>
 
  20#include <drm/drm_probe_helper.h>
  21
  22#include "drm.h"
  23#include "dc.h"
  24#include "plane.h"
  25
  26#define NFB 24
  27
  28static const u32 tegra_shared_plane_formats[] = {
  29	DRM_FORMAT_ARGB1555,
  30	DRM_FORMAT_RGB565,
  31	DRM_FORMAT_RGBA5551,
  32	DRM_FORMAT_ARGB8888,
  33	DRM_FORMAT_ABGR8888,
  34	/* new on Tegra114 */
  35	DRM_FORMAT_ABGR4444,
  36	DRM_FORMAT_ABGR1555,
  37	DRM_FORMAT_BGRA5551,
  38	DRM_FORMAT_XRGB1555,
  39	DRM_FORMAT_RGBX5551,
  40	DRM_FORMAT_XBGR1555,
  41	DRM_FORMAT_BGRX5551,
  42	DRM_FORMAT_BGR565,
  43	DRM_FORMAT_XRGB8888,
  44	DRM_FORMAT_XBGR8888,
  45	/* planar formats */
  46	DRM_FORMAT_UYVY,
  47	DRM_FORMAT_YUYV,
  48	DRM_FORMAT_YUV420,
  49	DRM_FORMAT_YUV422,
  50};
  51
  52static const u64 tegra_shared_plane_modifiers[] = {
  53	DRM_FORMAT_MOD_LINEAR,
  54	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
  55	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
  56	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
  57	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
  58	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
  59	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
  60	/*
  61	 * The GPU sector layout is only supported on Tegra194, but these will
  62	 * be filtered out later on by ->format_mod_supported() on SoCs where
  63	 * it isn't supported.
  64	 */
  65	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  66	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  67	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  68	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  69	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  70	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  71	/* sentinel */
  72	DRM_FORMAT_MOD_INVALID
  73};
  74
  75static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
  76					      unsigned int offset)
  77{
  78	if (offset >= 0x500 && offset <= 0x581) {
  79		offset = 0x000 + (offset - 0x500);
  80		return plane->offset + offset;
  81	}
  82
  83	if (offset >= 0x700 && offset <= 0x73c) {
  84		offset = 0x180 + (offset - 0x700);
  85		return plane->offset + offset;
  86	}
  87
  88	if (offset >= 0x800 && offset <= 0x83e) {
  89		offset = 0x1c0 + (offset - 0x800);
  90		return plane->offset + offset;
  91	}
  92
  93	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
  94
  95	return plane->offset + offset;
  96}
  97
  98static inline u32 tegra_plane_readl(struct tegra_plane *plane,
  99				    unsigned int offset)
 100{
 101	return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
 102}
 103
 104static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
 105				      unsigned int offset)
 106{
 107	tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
 108}
 109
 110static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
 111{
 112	int err = 0;
 113
 114	mutex_lock(&wgrp->lock);
 115
 116	if (wgrp->usecount == 0) {
 117		err = host1x_client_resume(wgrp->parent);
 118		if (err < 0) {
 119			dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
 120			goto unlock;
 121		}
 122
 123		reset_control_deassert(wgrp->rst);
 124	}
 125
 126	wgrp->usecount++;
 127
 128unlock:
 129	mutex_unlock(&wgrp->lock);
 130	return err;
 131}
 132
 133static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
 134{
 135	int err;
 136
 137	mutex_lock(&wgrp->lock);
 138
 139	if (wgrp->usecount == 1) {
 140		err = reset_control_assert(wgrp->rst);
 141		if (err < 0) {
 142			pr_err("failed to assert reset for window group %u\n",
 143			       wgrp->index);
 144		}
 145
 146		host1x_client_suspend(wgrp->parent);
 147	}
 148
 149	wgrp->usecount--;
 150	mutex_unlock(&wgrp->lock);
 151}
 152
 153int tegra_display_hub_prepare(struct tegra_display_hub *hub)
 154{
 155	unsigned int i;
 156
 157	/*
 158	 * XXX Enabling/disabling windowgroups needs to happen when the owner
 159	 * display controller is disabled. There's currently no good point at
 160	 * which this could be executed, so unconditionally enable all window
 161	 * groups for now.
 162	 */
 163	for (i = 0; i < hub->soc->num_wgrps; i++) {
 164		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 165
 166		/* Skip orphaned window group whose parent DC is disabled */
 167		if (wgrp->parent)
 168			tegra_windowgroup_enable(wgrp);
 169	}
 170
 171	return 0;
 172}
 173
 174void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
 175{
 176	unsigned int i;
 177
 178	/*
 179	 * XXX Remove this once window groups can be more fine-grainedly
 180	 * enabled and disabled.
 181	 */
 182	for (i = 0; i < hub->soc->num_wgrps; i++) {
 183		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 184
 185		/* Skip orphaned window group whose parent DC is disabled */
 186		if (wgrp->parent)
 187			tegra_windowgroup_disable(wgrp);
 188	}
 189}
 190
 191static void tegra_shared_plane_update(struct tegra_plane *plane)
 192{
 193	struct tegra_dc *dc = plane->dc;
 194	unsigned long timeout;
 195	u32 mask, value;
 196
 197	mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
 198	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
 199
 200	timeout = jiffies + msecs_to_jiffies(1000);
 201
 202	while (time_before(jiffies, timeout)) {
 203		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 204		if ((value & mask) == 0)
 205			break;
 206
 207		usleep_range(100, 400);
 208	}
 209}
 210
 211static void tegra_shared_plane_activate(struct tegra_plane *plane)
 212{
 213	struct tegra_dc *dc = plane->dc;
 214	unsigned long timeout;
 215	u32 mask, value;
 216
 217	mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
 218	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
 219
 220	timeout = jiffies + msecs_to_jiffies(1000);
 221
 222	while (time_before(jiffies, timeout)) {
 223		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 224		if ((value & mask) == 0)
 225			break;
 226
 227		usleep_range(100, 400);
 228	}
 229}
 230
 231static unsigned int
 232tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
 233{
 234	unsigned int offset =
 235		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
 236
 237	return tegra_dc_readl(dc, offset) & OWNER_MASK;
 238}
 239
 240static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
 241				       struct tegra_plane *plane)
 242{
 243	struct device *dev = dc->dev;
 244
 245	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
 246		if (plane->dc == dc)
 247			return true;
 248
 249		dev_WARN(dev, "head %u owns window %u but is not attached\n",
 250			 dc->pipe, plane->index);
 251	}
 252
 253	return false;
 254}
 255
 256static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
 257					struct tegra_dc *new)
 258{
 259	unsigned int offset =
 260		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
 261	struct tegra_dc *old = plane->dc, *dc = new ? new : old;
 262	struct device *dev = new ? new->dev : old->dev;
 263	unsigned int owner, index = plane->index;
 264	u32 value;
 265
 266	value = tegra_dc_readl(dc, offset);
 267	owner = value & OWNER_MASK;
 268
 269	if (new && (owner != OWNER_MASK && owner != new->pipe)) {
 270		dev_WARN(dev, "window %u owned by head %u\n", index, owner);
 271		return -EBUSY;
 272	}
 273
 274	/*
 275	 * This seems to happen whenever the head has been disabled with one
 276	 * or more windows being active. This is harmless because we'll just
 277	 * reassign the window to the new head anyway.
 278	 */
 279	if (old && owner == OWNER_MASK)
 280		dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
 281			old->pipe, owner);
 282
 283	value &= ~OWNER_MASK;
 284
 285	if (new)
 286		value |= OWNER(new->pipe);
 287	else
 288		value |= OWNER_MASK;
 289
 290	tegra_dc_writel(dc, value, offset);
 291
 292	plane->dc = new;
 293
 294	return 0;
 295}
 296
 297static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
 298{
 299	static const unsigned int coeffs[192] = {
 300		0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
 301		0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
 302		0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
 303		0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
 304		0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
 305		0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
 306		0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
 307		0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
 308		0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
 309		0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
 310		0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
 311		0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
 312		0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
 313		0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
 314		0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
 315		0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
 316		0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
 317		0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
 318		0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
 319		0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
 320		0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
 321		0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
 322		0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
 323		0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
 324		0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
 325		0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
 326		0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
 327		0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
 328		0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
 329		0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
 330		0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
 331		0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
 332		0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
 333		0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
 334		0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
 335		0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
 336		0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
 337		0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
 338		0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
 339		0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
 340		0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
 341		0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
 342		0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
 343		0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
 344		0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
 345		0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
 346		0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
 347		0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
 348	};
 349	unsigned int ratio, row, column;
 350
 351	for (ratio = 0; ratio <= 2; ratio++) {
 352		for (row = 0; row <= 15; row++) {
 353			for (column = 0; column <= 3; column++) {
 354				unsigned int index = (ratio << 6) + (row << 2) + column;
 355				u32 value;
 356
 357				value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
 358				tegra_plane_writel(plane, value,
 359						   DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
 360			}
 361		}
 362	}
 363}
 364
 365static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
 366					 struct tegra_plane *plane)
 367{
 368	u32 value;
 369	int err;
 370
 371	if (!tegra_dc_owns_shared_plane(dc, plane)) {
 372		err = tegra_shared_plane_set_owner(plane, dc);
 373		if (err < 0)
 374			return;
 375	}
 376
 377	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
 378	value |= MODE_FOUR_LINES;
 379	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
 380
 381	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
 382	value = SLOTS(1);
 383	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
 384
 385	/* disable watermark */
 386	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
 387	value &= ~LATENCY_CTL_MODE_ENABLE;
 388	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
 389
 390	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
 391	value |= WATERMARK_MASK;
 392	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
 393
 394	/* pipe meter */
 395	value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
 396	value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
 397	tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
 398
 399	/* mempool entries */
 400	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
 401	value = MEMPOOL_ENTRIES(0x331);
 402	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
 403
 404	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
 405	value &= ~THREAD_NUM_MASK;
 406	value |= THREAD_NUM(plane->base.index);
 407	value |= THREAD_GROUP_ENABLE;
 408	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
 409
 410	tegra_shared_plane_setup_scaler(plane);
 411
 412	tegra_shared_plane_update(plane);
 413	tegra_shared_plane_activate(plane);
 414}
 415
 416static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
 417					 struct tegra_plane *plane)
 418{
 419	tegra_shared_plane_set_owner(plane, NULL);
 420}
 421
 422static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
 423					   struct drm_atomic_state *state)
 424{
 425	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
 426										 plane);
 427	struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
 428	struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
 429	struct tegra_bo_tiling *tiling = &plane_state->tiling;
 430	struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
 431	int err;
 432
 433	/* no need for further checks if the plane is being disabled */
 434	if (!new_plane_state->crtc || !new_plane_state->fb)
 435		return 0;
 436
 437	err = tegra_plane_format(new_plane_state->fb->format->format,
 438				 &plane_state->format,
 439				 &plane_state->swap);
 440	if (err < 0)
 441		return err;
 442
 443	err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
 444	if (err < 0)
 445		return err;
 446
 447	if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
 448	    !dc->soc->supports_block_linear) {
 449		DRM_ERROR("hardware doesn't support block linear mode\n");
 450		return -EINVAL;
 451	}
 452
 453	if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
 454	    !dc->soc->supports_sector_layout) {
 455		DRM_ERROR("hardware doesn't support GPU sector layout\n");
 456		return -EINVAL;
 457	}
 458
 459	/*
 460	 * Tegra doesn't support different strides for U and V planes so we
 461	 * error out if the user tries to display a framebuffer with such a
 462	 * configuration.
 463	 */
 464	if (new_plane_state->fb->format->num_planes > 2) {
 465		if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
 466			DRM_ERROR("unsupported UV-plane configuration\n");
 467			return -EINVAL;
 468		}
 469	}
 470
 471	/* XXX scaling is not yet supported, add a check here */
 472
 473	err = tegra_plane_state_add(&tegra->base, new_plane_state);
 474	if (err < 0)
 475		return err;
 476
 477	return 0;
 478}
 479
 480static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
 481					      struct drm_atomic_state *state)
 482{
 483	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 484									   plane);
 485	struct tegra_plane *p = to_tegra_plane(plane);
 486	struct tegra_dc *dc;
 487	u32 value;
 488	int err;
 489
 490	/* rien ne va plus */
 491	if (!old_state || !old_state->crtc)
 492		return;
 493
 494	dc = to_tegra_dc(old_state->crtc);
 495
 496	err = host1x_client_resume(&dc->client);
 497	if (err < 0) {
 498		dev_err(dc->dev, "failed to resume: %d\n", err);
 499		return;
 500	}
 501
 502	/*
 503	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
 504	 * on planes that are already disabled. Make sure we fallback to the
 505	 * head for this particular state instead of crashing.
 506	 */
 507	if (WARN_ON(p->dc == NULL))
 508		p->dc = dc;
 509
 510	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
 511	value &= ~WIN_ENABLE;
 512	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
 513
 514	tegra_dc_remove_shared_plane(dc, p);
 515
 516	host1x_client_suspend(&dc->client);
 517}
 518
 519static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
 520{
 521	u64 tmp, tmp1, tmp2;
 522
 523	tmp = (u64)dfixed_trunc(in);
 524	tmp2 = (u64)out;
 525	tmp1 = (tmp << NFB) + (tmp2 >> 1);
 526	do_div(tmp1, tmp2);
 527
 528	return lower_32_bits(tmp1);
 529}
 530
 531static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
 532					     struct drm_atomic_state *state)
 533{
 534	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 535									   plane);
 536	struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
 537	struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
 538	unsigned int zpos = new_state->normalized_zpos;
 539	struct drm_framebuffer *fb = new_state->fb;
 540	struct tegra_plane *p = to_tegra_plane(plane);
 541	u32 value, min_width, bypass = 0;
 542	dma_addr_t base, addr_flag = 0;
 543	unsigned int bpc;
 544	bool yuv, planar;
 545	int err;
 546
 547	/* rien ne va plus */
 548	if (!new_state->crtc || !new_state->fb)
 549		return;
 550
 551	if (!new_state->visible) {
 552		tegra_shared_plane_atomic_disable(plane, state);
 553		return;
 554	}
 555
 556	err = host1x_client_resume(&dc->client);
 557	if (err < 0) {
 558		dev_err(dc->dev, "failed to resume: %d\n", err);
 559		return;
 560	}
 561
 562	yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planar, &bpc);
 563
 564	tegra_dc_assign_shared_plane(dc, p);
 565
 566	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
 567
 568	/* blending */
 569	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
 570		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
 571		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
 572	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
 573
 574	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
 575		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
 576		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
 577	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
 578
 579	value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
 580	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
 581
 582	/* scaling */
 583	min_width = min(new_state->src_w >> 16, new_state->crtc_w);
 584
 585	value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
 586
 587	if (min_width < MAX_PIXELS_5TAP444(value)) {
 588		value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
 589	} else {
 590		value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
 591
 592		if (min_width < MAX_PIXELS_2TAP444(value))
 593			value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
 594		else
 595			dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
 596	}
 597
 598	value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
 599	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
 600
 601	if (new_state->src_w != new_state->crtc_w << 16) {
 602		fixed20_12 width = dfixed_init(new_state->src_w >> 16);
 603		u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
 604		u32 init = (1 << (NFB - 1)) + (incr >> 1);
 605
 606		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
 607		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
 608	} else {
 609		bypass |= INPUT_SCALER_HBYPASS;
 610	}
 611
 612	if (new_state->src_h != new_state->crtc_h << 16) {
 613		fixed20_12 height = dfixed_init(new_state->src_h >> 16);
 614		u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
 615		u32 init = (1 << (NFB - 1)) + (incr >> 1);
 616
 617		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
 618		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
 619	} else {
 620		bypass |= INPUT_SCALER_VBYPASS;
 621	}
 622
 623	tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
 624
 625	/* disable compression */
 626	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
 627
 628#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 629	/*
 630	 * Physical address bit 39 in Tegra194 is used as a switch for special
 631	 * logic that swizzles the memory using either the legacy Tegra or the
 632	 * dGPU sector layout.
 633	 */
 634	if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
 635		addr_flag = BIT_ULL(39);
 636#endif
 637
 638	base = tegra_plane_state->iova[0] + fb->offsets[0];
 639	base |= addr_flag;
 640
 641	tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
 642	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
 643
 644	value = V_POSITION(new_state->crtc_y) |
 645		H_POSITION(new_state->crtc_x);
 646	tegra_plane_writel(p, value, DC_WIN_POSITION);
 647
 648	value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
 649	tegra_plane_writel(p, value, DC_WIN_SIZE);
 650
 651	value = WIN_ENABLE | COLOR_EXPAND;
 652	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
 653
 654	value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
 655	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
 656
 657	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
 658	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
 659
 660	value = PITCH(fb->pitches[0]);
 661	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
 662
 663	if (yuv && planar) {
 664		base = tegra_plane_state->iova[1] + fb->offsets[1];
 665		base |= addr_flag;
 666
 667		tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
 668		tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
 669
 670		base = tegra_plane_state->iova[2] + fb->offsets[2];
 671		base |= addr_flag;
 
 
 
 
 
 
 
 672
 673		tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
 674		tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
 675
 676		value = PITCH_U(fb->pitches[2]) | PITCH_V(fb->pitches[2]);
 677		tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
 678	} else {
 679		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
 680		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
 681		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
 682		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
 683		tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
 684	}
 685
 686	value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
 687
 688	if (yuv) {
 689		if (bpc < 12)
 690			value |= DEGAMMA_YUV8_10;
 691		else
 692			value |= DEGAMMA_YUV12;
 693
 694		/* XXX parameterize */
 695		value |= COLOR_SPACE_YUV_2020;
 696	} else {
 697		if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
 698			value |= DEGAMMA_SRGB;
 699	}
 700
 701	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
 702
 703	value = OFFSET_X(new_state->src_y >> 16) |
 704		OFFSET_Y(new_state->src_x >> 16);
 705	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
 706
 707	if (dc->soc->supports_block_linear) {
 708		unsigned long height = tegra_plane_state->tiling.value;
 709
 710		/* XXX */
 711		switch (tegra_plane_state->tiling.mode) {
 712		case TEGRA_BO_TILING_MODE_PITCH:
 713			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
 714				DC_WINBUF_SURFACE_KIND_PITCH;
 715			break;
 716
 717		/* XXX not supported on Tegra186 and later */
 718		case TEGRA_BO_TILING_MODE_TILED:
 719			value = DC_WINBUF_SURFACE_KIND_TILED;
 720			break;
 721
 722		case TEGRA_BO_TILING_MODE_BLOCK:
 723			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
 724				DC_WINBUF_SURFACE_KIND_BLOCK;
 725			break;
 726		}
 727
 728		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
 729	}
 730
 731	/* disable gamut CSC */
 732	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
 733	value &= ~CONTROL_CSC_ENABLE;
 734	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
 735
 736	host1x_client_suspend(&dc->client);
 737}
 738
 739static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
 740	.prepare_fb = tegra_plane_prepare_fb,
 741	.cleanup_fb = tegra_plane_cleanup_fb,
 742	.atomic_check = tegra_shared_plane_atomic_check,
 743	.atomic_update = tegra_shared_plane_atomic_update,
 744	.atomic_disable = tegra_shared_plane_atomic_disable,
 745};
 746
 747struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
 748					    struct tegra_dc *dc,
 749					    unsigned int wgrp,
 750					    unsigned int index)
 751{
 752	enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
 753	struct tegra_drm *tegra = drm->dev_private;
 754	struct tegra_display_hub *hub = tegra->hub;
 755	struct tegra_shared_plane *plane;
 756	unsigned int possible_crtcs;
 757	unsigned int num_formats;
 758	const u64 *modifiers;
 759	struct drm_plane *p;
 760	const u32 *formats;
 761	int err;
 762
 763	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
 764	if (!plane)
 765		return ERR_PTR(-ENOMEM);
 766
 767	plane->base.offset = 0x0a00 + 0x0300 * index;
 768	plane->base.index = index;
 769
 770	plane->wgrp = &hub->wgrps[wgrp];
 771	plane->wgrp->parent = &dc->client;
 772
 773	p = &plane->base.base;
 774
 775	/* planes can be assigned to arbitrary CRTCs */
 776	possible_crtcs = BIT(tegra->num_crtcs) - 1;
 777
 778	num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
 779	formats = tegra_shared_plane_formats;
 780	modifiers = tegra_shared_plane_modifiers;
 781
 782	err = drm_universal_plane_init(drm, p, possible_crtcs,
 783				       &tegra_plane_funcs, formats,
 784				       num_formats, modifiers, type, NULL);
 785	if (err < 0) {
 786		kfree(plane);
 787		return ERR_PTR(err);
 788	}
 789
 790	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
 791	drm_plane_create_zpos_property(p, 0, 0, 255);
 792
 793	return p;
 794}
 795
 796static struct drm_private_state *
 797tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
 798{
 799	struct tegra_display_hub_state *state;
 800
 801	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
 802	if (!state)
 803		return NULL;
 804
 805	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
 806
 807	return &state->base;
 808}
 809
 810static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
 811					    struct drm_private_state *state)
 812{
 813	struct tegra_display_hub_state *hub_state =
 814		to_tegra_display_hub_state(state);
 815
 816	kfree(hub_state);
 817}
 818
 819static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
 820	.atomic_duplicate_state = tegra_display_hub_duplicate_state,
 821	.atomic_destroy_state = tegra_display_hub_destroy_state,
 822};
 823
 824static struct tegra_display_hub_state *
 825tegra_display_hub_get_state(struct tegra_display_hub *hub,
 826			    struct drm_atomic_state *state)
 827{
 828	struct drm_private_state *priv;
 829
 830	priv = drm_atomic_get_private_obj_state(state, &hub->base);
 831	if (IS_ERR(priv))
 832		return ERR_CAST(priv);
 833
 834	return to_tegra_display_hub_state(priv);
 835}
 836
 837int tegra_display_hub_atomic_check(struct drm_device *drm,
 838				   struct drm_atomic_state *state)
 839{
 840	struct tegra_drm *tegra = drm->dev_private;
 841	struct tegra_display_hub_state *hub_state;
 842	struct drm_crtc_state *old, *new;
 843	struct drm_crtc *crtc;
 844	unsigned int i;
 845
 846	if (!tegra->hub)
 847		return 0;
 848
 849	hub_state = tegra_display_hub_get_state(tegra->hub, state);
 850	if (IS_ERR(hub_state))
 851		return PTR_ERR(hub_state);
 852
 853	/*
 854	 * The display hub display clock needs to be fed by the display clock
 855	 * with the highest frequency to ensure proper functioning of all the
 856	 * displays.
 857	 *
 858	 * Note that this isn't used before Tegra186, but it doesn't hurt and
 859	 * conditionalizing it would make the code less clean.
 860	 */
 861	for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
 862		struct tegra_dc_state *dc = to_dc_state(new);
 863
 864		if (new->active) {
 865			if (!hub_state->clk || dc->pclk > hub_state->rate) {
 866				hub_state->dc = to_tegra_dc(dc->base.crtc);
 867				hub_state->clk = hub_state->dc->clk;
 868				hub_state->rate = dc->pclk;
 869			}
 870		}
 871	}
 872
 873	return 0;
 874}
 875
 876static void tegra_display_hub_update(struct tegra_dc *dc)
 877{
 878	u32 value;
 879	int err;
 880
 881	err = host1x_client_resume(&dc->client);
 882	if (err < 0) {
 883		dev_err(dc->dev, "failed to resume: %d\n", err);
 884		return;
 885	}
 886
 887	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
 888	value &= ~LATENCY_EVENT;
 889	tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
 890
 891	value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
 892	value = CURS_SLOTS(1) | WGRP_SLOTS(1);
 893	tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
 894
 895	tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
 896	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 897	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
 898	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 899
 900	host1x_client_suspend(&dc->client);
 901}
 902
 903void tegra_display_hub_atomic_commit(struct drm_device *drm,
 904				     struct drm_atomic_state *state)
 905{
 906	struct tegra_drm *tegra = drm->dev_private;
 907	struct tegra_display_hub *hub = tegra->hub;
 908	struct tegra_display_hub_state *hub_state;
 909	struct device *dev = hub->client.dev;
 910	int err;
 911
 912	hub_state = to_tegra_display_hub_state(hub->base.state);
 913
 914	if (hub_state->clk) {
 915		err = clk_set_rate(hub_state->clk, hub_state->rate);
 916		if (err < 0)
 917			dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
 918				hub_state->clk, hub_state->rate);
 919
 920		err = clk_set_parent(hub->clk_disp, hub_state->clk);
 921		if (err < 0)
 922			dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
 923				hub->clk_disp, hub_state->clk, err);
 924	}
 925
 926	if (hub_state->dc)
 927		tegra_display_hub_update(hub_state->dc);
 928}
 929
 930static int tegra_display_hub_init(struct host1x_client *client)
 931{
 932	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 933	struct drm_device *drm = dev_get_drvdata(client->host);
 934	struct tegra_drm *tegra = drm->dev_private;
 935	struct tegra_display_hub_state *state;
 936
 937	state = kzalloc(sizeof(*state), GFP_KERNEL);
 938	if (!state)
 939		return -ENOMEM;
 940
 941	drm_atomic_private_obj_init(drm, &hub->base, &state->base,
 942				    &tegra_display_hub_state_funcs);
 943
 944	tegra->hub = hub;
 945
 946	return 0;
 947}
 948
 949static int tegra_display_hub_exit(struct host1x_client *client)
 950{
 951	struct drm_device *drm = dev_get_drvdata(client->host);
 952	struct tegra_drm *tegra = drm->dev_private;
 953
 954	drm_atomic_private_obj_fini(&tegra->hub->base);
 955	tegra->hub = NULL;
 956
 957	return 0;
 958}
 959
 960static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
 961{
 962	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 963	struct device *dev = client->dev;
 964	unsigned int i = hub->num_heads;
 965	int err;
 966
 967	err = reset_control_assert(hub->rst);
 968	if (err < 0)
 969		return err;
 970
 971	while (i--)
 972		clk_disable_unprepare(hub->clk_heads[i]);
 973
 974	clk_disable_unprepare(hub->clk_hub);
 975	clk_disable_unprepare(hub->clk_dsc);
 976	clk_disable_unprepare(hub->clk_disp);
 977
 978	pm_runtime_put_sync(dev);
 979
 980	return 0;
 981}
 982
 983static int tegra_display_hub_runtime_resume(struct host1x_client *client)
 984{
 985	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 986	struct device *dev = client->dev;
 987	unsigned int i;
 988	int err;
 989
 990	err = pm_runtime_resume_and_get(dev);
 991	if (err < 0) {
 992		dev_err(dev, "failed to get runtime PM: %d\n", err);
 993		return err;
 994	}
 995
 996	err = clk_prepare_enable(hub->clk_disp);
 997	if (err < 0)
 998		goto put_rpm;
 999
1000	err = clk_prepare_enable(hub->clk_dsc);
1001	if (err < 0)
1002		goto disable_disp;
1003
1004	err = clk_prepare_enable(hub->clk_hub);
1005	if (err < 0)
1006		goto disable_dsc;
1007
1008	for (i = 0; i < hub->num_heads; i++) {
1009		err = clk_prepare_enable(hub->clk_heads[i]);
1010		if (err < 0)
1011			goto disable_heads;
1012	}
1013
1014	err = reset_control_deassert(hub->rst);
1015	if (err < 0)
1016		goto disable_heads;
1017
1018	return 0;
1019
1020disable_heads:
1021	while (i--)
1022		clk_disable_unprepare(hub->clk_heads[i]);
1023
1024	clk_disable_unprepare(hub->clk_hub);
1025disable_dsc:
1026	clk_disable_unprepare(hub->clk_dsc);
1027disable_disp:
1028	clk_disable_unprepare(hub->clk_disp);
1029put_rpm:
1030	pm_runtime_put_sync(dev);
1031	return err;
1032}
1033
1034static const struct host1x_client_ops tegra_display_hub_ops = {
1035	.init = tegra_display_hub_init,
1036	.exit = tegra_display_hub_exit,
1037	.suspend = tegra_display_hub_runtime_suspend,
1038	.resume = tegra_display_hub_runtime_resume,
1039};
1040
1041static int tegra_display_hub_probe(struct platform_device *pdev)
1042{
1043	u64 dma_mask = dma_get_mask(pdev->dev.parent);
1044	struct device_node *child = NULL;
1045	struct tegra_display_hub *hub;
1046	struct clk *clk;
1047	unsigned int i;
1048	int err;
1049
1050	err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1051	if (err < 0) {
1052		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1053		return err;
1054	}
1055
1056	hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1057	if (!hub)
1058		return -ENOMEM;
1059
1060	hub->soc = of_device_get_match_data(&pdev->dev);
1061
1062	hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1063	if (IS_ERR(hub->clk_disp)) {
1064		err = PTR_ERR(hub->clk_disp);
1065		return err;
1066	}
1067
1068	if (hub->soc->supports_dsc) {
1069		hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1070		if (IS_ERR(hub->clk_dsc)) {
1071			err = PTR_ERR(hub->clk_dsc);
1072			return err;
1073		}
1074	}
1075
1076	hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1077	if (IS_ERR(hub->clk_hub)) {
1078		err = PTR_ERR(hub->clk_hub);
1079		return err;
1080	}
1081
1082	hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1083	if (IS_ERR(hub->rst)) {
1084		err = PTR_ERR(hub->rst);
1085		return err;
1086	}
1087
1088	hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1089				  sizeof(*hub->wgrps), GFP_KERNEL);
1090	if (!hub->wgrps)
1091		return -ENOMEM;
1092
1093	for (i = 0; i < hub->soc->num_wgrps; i++) {
1094		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1095		char id[8];
1096
1097		snprintf(id, sizeof(id), "wgrp%u", i);
1098		mutex_init(&wgrp->lock);
1099		wgrp->usecount = 0;
1100		wgrp->index = i;
1101
1102		wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1103		if (IS_ERR(wgrp->rst))
1104			return PTR_ERR(wgrp->rst);
1105
1106		err = reset_control_assert(wgrp->rst);
1107		if (err < 0)
1108			return err;
1109	}
1110
1111	hub->num_heads = of_get_child_count(pdev->dev.of_node);
1112
1113	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1114				      GFP_KERNEL);
1115	if (!hub->clk_heads)
1116		return -ENOMEM;
1117
1118	for (i = 0; i < hub->num_heads; i++) {
1119		child = of_get_next_child(pdev->dev.of_node, child);
1120		if (!child) {
1121			dev_err(&pdev->dev, "failed to find node for head %u\n",
1122				i);
1123			return -ENODEV;
1124		}
1125
1126		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1127		if (IS_ERR(clk)) {
1128			dev_err(&pdev->dev, "failed to get clock for head %u\n",
1129				i);
1130			of_node_put(child);
1131			return PTR_ERR(clk);
1132		}
1133
1134		hub->clk_heads[i] = clk;
1135	}
1136
1137	of_node_put(child);
1138
1139	/* XXX: enable clock across reset? */
1140	err = reset_control_assert(hub->rst);
1141	if (err < 0)
1142		return err;
1143
1144	platform_set_drvdata(pdev, hub);
1145	pm_runtime_enable(&pdev->dev);
1146
1147	INIT_LIST_HEAD(&hub->client.list);
1148	hub->client.ops = &tegra_display_hub_ops;
1149	hub->client.dev = &pdev->dev;
1150
1151	err = host1x_client_register(&hub->client);
1152	if (err < 0)
1153		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1154			err);
1155
1156	err = devm_of_platform_populate(&pdev->dev);
1157	if (err < 0)
1158		goto unregister;
1159
1160	return err;
1161
1162unregister:
1163	host1x_client_unregister(&hub->client);
1164	pm_runtime_disable(&pdev->dev);
1165	return err;
1166}
1167
1168static int tegra_display_hub_remove(struct platform_device *pdev)
1169{
1170	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1171	unsigned int i;
1172	int err;
1173
1174	err = host1x_client_unregister(&hub->client);
1175	if (err < 0) {
1176		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1177			err);
1178	}
1179
1180	for (i = 0; i < hub->soc->num_wgrps; i++) {
1181		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1182
1183		mutex_destroy(&wgrp->lock);
1184	}
1185
1186	pm_runtime_disable(&pdev->dev);
1187
1188	return err;
1189}
1190
1191static const struct tegra_display_hub_soc tegra186_display_hub = {
1192	.num_wgrps = 6,
1193	.supports_dsc = true,
1194};
1195
1196static const struct tegra_display_hub_soc tegra194_display_hub = {
1197	.num_wgrps = 6,
1198	.supports_dsc = false,
1199};
1200
1201static const struct of_device_id tegra_display_hub_of_match[] = {
1202	{
1203		.compatible = "nvidia,tegra194-display",
1204		.data = &tegra194_display_hub
1205	}, {
1206		.compatible = "nvidia,tegra186-display",
1207		.data = &tegra186_display_hub
1208	}, {
1209		/* sentinel */
1210	}
1211};
1212MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1213
1214struct platform_driver tegra_display_hub_driver = {
1215	.driver = {
1216		.name = "tegra-display-hub",
1217		.of_match_table = tegra_display_hub_of_match,
1218	},
1219	.probe = tegra_display_hub_probe,
1220	.remove = tegra_display_hub_remove,
1221};