Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
   4 */
   5
   6#include <linux/clk.h>
   7#include <linux/delay.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/host1x.h>
  10#include <linux/module.h>
  11#include <linux/of.h>
  12#include <linux/of_graph.h>
  13#include <linux/of_platform.h>
  14#include <linux/platform_device.h>
  15#include <linux/pm_runtime.h>
  16#include <linux/reset.h>
  17
  18#include <drm/drm_atomic.h>
  19#include <drm/drm_atomic_helper.h>
  20#include <drm/drm_blend.h>
  21#include <drm/drm_fourcc.h>
  22#include <drm/drm_framebuffer.h>
  23#include <drm/drm_probe_helper.h>
  24
  25#include "drm.h"
  26#include "dc.h"
  27#include "plane.h"
  28
  29#define NFB 24
  30
  31static const u32 tegra_shared_plane_formats[] = {
  32	DRM_FORMAT_ARGB1555,
  33	DRM_FORMAT_RGB565,
  34	DRM_FORMAT_RGBA5551,
  35	DRM_FORMAT_ARGB8888,
  36	DRM_FORMAT_ABGR8888,
  37	/* new on Tegra114 */
  38	DRM_FORMAT_ABGR4444,
  39	DRM_FORMAT_ABGR1555,
  40	DRM_FORMAT_BGRA5551,
  41	DRM_FORMAT_XRGB1555,
  42	DRM_FORMAT_RGBX5551,
  43	DRM_FORMAT_XBGR1555,
  44	DRM_FORMAT_BGRX5551,
  45	DRM_FORMAT_BGR565,
  46	DRM_FORMAT_XRGB8888,
  47	DRM_FORMAT_XBGR8888,
  48	/* planar formats */
  49	DRM_FORMAT_UYVY,
  50	DRM_FORMAT_YUYV,
  51	DRM_FORMAT_YUV420,
  52	DRM_FORMAT_YUV422,
  53};
  54
  55static const u64 tegra_shared_plane_modifiers[] = {
  56	DRM_FORMAT_MOD_LINEAR,
  57	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
  58	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
  59	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
  60	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
  61	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
  62	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
  63	/*
  64	 * The GPU sector layout is only supported on Tegra194, but these will
  65	 * be filtered out later on by ->format_mod_supported() on SoCs where
  66	 * it isn't supported.
  67	 */
  68	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  69	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  70	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  71	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  72	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  73	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  74	/* sentinel */
  75	DRM_FORMAT_MOD_INVALID
  76};
  77
  78static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
  79					      unsigned int offset)
  80{
  81	if (offset >= 0x500 && offset <= 0x581) {
  82		offset = 0x000 + (offset - 0x500);
  83		return plane->offset + offset;
  84	}
  85
  86	if (offset >= 0x700 && offset <= 0x73c) {
  87		offset = 0x180 + (offset - 0x700);
  88		return plane->offset + offset;
  89	}
  90
  91	if (offset >= 0x800 && offset <= 0x83e) {
  92		offset = 0x1c0 + (offset - 0x800);
  93		return plane->offset + offset;
  94	}
  95
  96	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
  97
  98	return plane->offset + offset;
  99}
 100
 101static inline u32 tegra_plane_readl(struct tegra_plane *plane,
 102				    unsigned int offset)
 103{
 104	return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
 105}
 106
 107static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
 108				      unsigned int offset)
 109{
 110	tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
 111}
 112
 113static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
 114{
 115	int err = 0;
 116
 117	mutex_lock(&wgrp->lock);
 118
 119	if (wgrp->usecount == 0) {
 120		err = host1x_client_resume(wgrp->parent);
 121		if (err < 0) {
 122			dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
 123			goto unlock;
 124		}
 125
 126		reset_control_deassert(wgrp->rst);
 127	}
 128
 129	wgrp->usecount++;
 130
 131unlock:
 132	mutex_unlock(&wgrp->lock);
 133	return err;
 134}
 135
 136static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
 137{
 138	int err;
 139
 140	mutex_lock(&wgrp->lock);
 141
 142	if (wgrp->usecount == 1) {
 143		err = reset_control_assert(wgrp->rst);
 144		if (err < 0) {
 145			pr_err("failed to assert reset for window group %u\n",
 146			       wgrp->index);
 147		}
 148
 149		host1x_client_suspend(wgrp->parent);
 150	}
 151
 152	wgrp->usecount--;
 153	mutex_unlock(&wgrp->lock);
 154}
 155
 156int tegra_display_hub_prepare(struct tegra_display_hub *hub)
 157{
 158	unsigned int i;
 159
 160	/*
 161	 * XXX Enabling/disabling windowgroups needs to happen when the owner
 162	 * display controller is disabled. There's currently no good point at
 163	 * which this could be executed, so unconditionally enable all window
 164	 * groups for now.
 165	 */
 166	for (i = 0; i < hub->soc->num_wgrps; i++) {
 167		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 168
 169		/* Skip orphaned window group whose parent DC is disabled */
 170		if (wgrp->parent)
 171			tegra_windowgroup_enable(wgrp);
 172	}
 173
 174	return 0;
 175}
 176
 177void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
 178{
 179	unsigned int i;
 180
 181	/*
 182	 * XXX Remove this once window groups can be more fine-grainedly
 183	 * enabled and disabled.
 184	 */
 185	for (i = 0; i < hub->soc->num_wgrps; i++) {
 186		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 187
 188		/* Skip orphaned window group whose parent DC is disabled */
 189		if (wgrp->parent)
 190			tegra_windowgroup_disable(wgrp);
 191	}
 192}
 193
 194static void tegra_shared_plane_update(struct tegra_plane *plane)
 195{
 196	struct tegra_dc *dc = plane->dc;
 197	unsigned long timeout;
 198	u32 mask, value;
 199
 200	mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
 201	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
 202
 203	timeout = jiffies + msecs_to_jiffies(1000);
 204
 205	while (time_before(jiffies, timeout)) {
 206		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 207		if ((value & mask) == 0)
 208			break;
 209
 210		usleep_range(100, 400);
 211	}
 212}
 213
 214static void tegra_shared_plane_activate(struct tegra_plane *plane)
 215{
 216	struct tegra_dc *dc = plane->dc;
 217	unsigned long timeout;
 218	u32 mask, value;
 219
 220	mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
 221	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
 222
 223	timeout = jiffies + msecs_to_jiffies(1000);
 224
 225	while (time_before(jiffies, timeout)) {
 226		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 227		if ((value & mask) == 0)
 228			break;
 229
 230		usleep_range(100, 400);
 231	}
 232}
 233
 234static unsigned int
 235tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
 236{
 237	unsigned int offset =
 238		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
 239
 240	return tegra_dc_readl(dc, offset) & OWNER_MASK;
 241}
 242
 243static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
 244				       struct tegra_plane *plane)
 245{
 246	struct device *dev = dc->dev;
 247
 248	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
 249		if (plane->dc == dc)
 250			return true;
 251
 252		dev_WARN(dev, "head %u owns window %u but is not attached\n",
 253			 dc->pipe, plane->index);
 254	}
 255
 256	return false;
 257}
 258
 259static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
 260					struct tegra_dc *new)
 261{
 262	unsigned int offset =
 263		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
 264	struct tegra_dc *old = plane->dc, *dc = new ? new : old;
 265	struct device *dev = new ? new->dev : old->dev;
 266	unsigned int owner, index = plane->index;
 267	u32 value;
 268
 269	value = tegra_dc_readl(dc, offset);
 270	owner = value & OWNER_MASK;
 271
 272	if (new && (owner != OWNER_MASK && owner != new->pipe)) {
 273		dev_WARN(dev, "window %u owned by head %u\n", index, owner);
 274		return -EBUSY;
 275	}
 276
 277	/*
 278	 * This seems to happen whenever the head has been disabled with one
 279	 * or more windows being active. This is harmless because we'll just
 280	 * reassign the window to the new head anyway.
 281	 */
 282	if (old && owner == OWNER_MASK)
 283		dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
 284			old->pipe, owner);
 285
 286	value &= ~OWNER_MASK;
 287
 288	if (new)
 289		value |= OWNER(new->pipe);
 290	else
 291		value |= OWNER_MASK;
 292
 293	tegra_dc_writel(dc, value, offset);
 294
 295	plane->dc = new;
 296
 297	return 0;
 298}
 299
 300static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
 301{
 302	static const unsigned int coeffs[192] = {
 303		0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
 304		0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
 305		0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
 306		0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
 307		0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
 308		0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
 309		0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
 310		0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
 311		0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
 312		0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
 313		0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
 314		0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
 315		0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
 316		0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
 317		0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
 318		0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
 319		0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
 320		0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
 321		0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
 322		0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
 323		0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
 324		0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
 325		0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
 326		0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
 327		0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
 328		0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
 329		0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
 330		0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
 331		0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
 332		0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
 333		0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
 334		0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
 335		0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
 336		0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
 337		0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
 338		0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
 339		0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
 340		0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
 341		0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
 342		0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
 343		0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
 344		0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
 345		0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
 346		0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
 347		0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
 348		0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
 349		0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
 350		0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
 351	};
 352	unsigned int ratio, row, column;
 353
 354	for (ratio = 0; ratio <= 2; ratio++) {
 355		for (row = 0; row <= 15; row++) {
 356			for (column = 0; column <= 3; column++) {
 357				unsigned int index = (ratio << 6) + (row << 2) + column;
 358				u32 value;
 359
 360				value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
 361				tegra_plane_writel(plane, value,
 362						   DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
 363			}
 364		}
 365	}
 366}
 367
 368static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
 369					 struct tegra_plane *plane)
 370{
 371	u32 value;
 372	int err;
 373
 374	if (!tegra_dc_owns_shared_plane(dc, plane)) {
 375		err = tegra_shared_plane_set_owner(plane, dc);
 376		if (err < 0)
 377			return;
 378	}
 379
 380	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
 381	value |= MODE_FOUR_LINES;
 382	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
 383
 384	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
 385	value = SLOTS(1);
 386	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
 387
 388	/* disable watermark */
 389	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
 390	value &= ~LATENCY_CTL_MODE_ENABLE;
 391	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
 392
 393	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
 394	value |= WATERMARK_MASK;
 395	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
 396
 397	/* pipe meter */
 398	value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
 399	value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
 400	tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
 401
 402	/* mempool entries */
 403	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
 404	value = MEMPOOL_ENTRIES(0x331);
 405	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
 406
 407	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
 408	value &= ~THREAD_NUM_MASK;
 409	value |= THREAD_NUM(plane->base.index);
 410	value |= THREAD_GROUP_ENABLE;
 411	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
 412
 413	tegra_shared_plane_setup_scaler(plane);
 414
 415	tegra_shared_plane_update(plane);
 416	tegra_shared_plane_activate(plane);
 417}
 418
 419static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
 420					 struct tegra_plane *plane)
 421{
 422	tegra_shared_plane_set_owner(plane, NULL);
 423}
 424
 425static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
 426					   struct drm_atomic_state *state)
 427{
 428	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
 429										 plane);
 430	struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
 431	struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
 432	struct tegra_bo_tiling *tiling = &plane_state->tiling;
 433	struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
 434	int err;
 435
 436	/* no need for further checks if the plane is being disabled */
 437	if (!new_plane_state->crtc || !new_plane_state->fb)
 438		return 0;
 439
 440	err = tegra_plane_format(new_plane_state->fb->format->format,
 441				 &plane_state->format,
 442				 &plane_state->swap);
 443	if (err < 0)
 444		return err;
 445
 446	err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
 447	if (err < 0)
 448		return err;
 449
 450	if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
 451	    !dc->soc->supports_block_linear) {
 452		DRM_ERROR("hardware doesn't support block linear mode\n");
 453		return -EINVAL;
 454	}
 455
 456	if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
 457	    !dc->soc->supports_sector_layout) {
 458		DRM_ERROR("hardware doesn't support GPU sector layout\n");
 459		return -EINVAL;
 460	}
 461
 462	/*
 463	 * Tegra doesn't support different strides for U and V planes so we
 464	 * error out if the user tries to display a framebuffer with such a
 465	 * configuration.
 466	 */
 467	if (new_plane_state->fb->format->num_planes > 2) {
 468		if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
 469			DRM_ERROR("unsupported UV-plane configuration\n");
 470			return -EINVAL;
 471		}
 472	}
 473
 474	/* XXX scaling is not yet supported, add a check here */
 475
 476	err = tegra_plane_state_add(&tegra->base, new_plane_state);
 477	if (err < 0)
 478		return err;
 479
 480	return 0;
 481}
 482
 483static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
 484					      struct drm_atomic_state *state)
 485{
 486	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 487									   plane);
 488	struct tegra_plane *p = to_tegra_plane(plane);
 489	struct tegra_dc *dc;
 490	u32 value;
 491	int err;
 492
 493	/* rien ne va plus */
 494	if (!old_state || !old_state->crtc)
 495		return;
 496
 497	dc = to_tegra_dc(old_state->crtc);
 498
 499	err = host1x_client_resume(&dc->client);
 500	if (err < 0) {
 501		dev_err(dc->dev, "failed to resume: %d\n", err);
 502		return;
 503	}
 504
 505	/*
 506	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
 507	 * on planes that are already disabled. Make sure we fallback to the
 508	 * head for this particular state instead of crashing.
 509	 */
 510	if (WARN_ON(p->dc == NULL))
 511		p->dc = dc;
 512
 513	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
 514	value &= ~WIN_ENABLE;
 515	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
 516
 517	tegra_dc_remove_shared_plane(dc, p);
 518
 519	host1x_client_suspend(&dc->client);
 520}
 521
 522static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
 523{
 524	u64 tmp, tmp1;
 525
 526	tmp = (u64)dfixed_trunc(in);
 527	tmp1 = (tmp << NFB) + ((u64)out >> 1);
 528	do_div(tmp1, out);
 529
 530	return lower_32_bits(tmp1);
 531}
 532
 533static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
 534					     struct drm_atomic_state *state)
 535{
 536	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 537									   plane);
 538	struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
 539	struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
 540	unsigned int zpos = new_state->normalized_zpos;
 541	struct drm_framebuffer *fb = new_state->fb;
 542	struct tegra_plane *p = to_tegra_plane(plane);
 543	u32 value, min_width, bypass = 0;
 544	dma_addr_t base, addr_flag = 0;
 545	unsigned int bpc, planes;
 546	bool yuv;
 547	int err;
 548
 549	/* rien ne va plus */
 550	if (!new_state->crtc || !new_state->fb)
 551		return;
 552
 553	if (!new_state->visible) {
 554		tegra_shared_plane_atomic_disable(plane, state);
 555		return;
 556	}
 557
 558	err = host1x_client_resume(&dc->client);
 559	if (err < 0) {
 560		dev_err(dc->dev, "failed to resume: %d\n", err);
 561		return;
 562	}
 563
 564	yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
 565
 566	tegra_dc_assign_shared_plane(dc, p);
 567
 568	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
 569
 570	/* blending */
 571	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
 572		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
 573		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
 574	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
 575
 576	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
 577		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
 578		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
 579	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
 580
 581	value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
 582	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
 583
 584	/* scaling */
 585	min_width = min(new_state->src_w >> 16, new_state->crtc_w);
 586
 587	value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
 588
 589	if (min_width < MAX_PIXELS_5TAP444(value)) {
 590		value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
 591	} else {
 592		value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
 593
 594		if (min_width < MAX_PIXELS_2TAP444(value))
 595			value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
 596		else
 597			dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
 598	}
 599
 600	value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
 601	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
 602
 603	if (new_state->src_w != new_state->crtc_w << 16) {
 604		fixed20_12 width = dfixed_init(new_state->src_w >> 16);
 605		u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
 606		u32 init = (1 << (NFB - 1)) + (incr >> 1);
 607
 608		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
 609		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
 610	} else {
 611		bypass |= INPUT_SCALER_HBYPASS;
 612	}
 613
 614	if (new_state->src_h != new_state->crtc_h << 16) {
 615		fixed20_12 height = dfixed_init(new_state->src_h >> 16);
 616		u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
 617		u32 init = (1 << (NFB - 1)) + (incr >> 1);
 618
 619		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
 620		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
 621	} else {
 622		bypass |= INPUT_SCALER_VBYPASS;
 623	}
 624
 625	tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
 626
 627	/* disable compression */
 628	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
 629
 630#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 631	/*
 632	 * Physical address bit 39 in Tegra194 is used as a switch for special
 633	 * logic that swizzles the memory using either the legacy Tegra or the
 634	 * dGPU sector layout.
 635	 */
 636	if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
 637		addr_flag = BIT_ULL(39);
 638#endif
 639
 640	base = tegra_plane_state->iova[0] + fb->offsets[0];
 641	base |= addr_flag;
 642
 643	tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
 644	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
 645
 646	value = V_POSITION(new_state->crtc_y) |
 647		H_POSITION(new_state->crtc_x);
 648	tegra_plane_writel(p, value, DC_WIN_POSITION);
 649
 650	value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
 651	tegra_plane_writel(p, value, DC_WIN_SIZE);
 652
 653	value = WIN_ENABLE | COLOR_EXPAND;
 654	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
 655
 656	value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
 657	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
 658
 659	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
 660	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
 661
 662	value = PITCH(fb->pitches[0]);
 663	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
 664
 665	if (yuv && planes > 1) {
 666		base = tegra_plane_state->iova[1] + fb->offsets[1];
 667		base |= addr_flag;
 668
 669		tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
 670		tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
 671
 672		if (planes > 2) {
 673			base = tegra_plane_state->iova[2] + fb->offsets[2];
 674			base |= addr_flag;
 675
 676			tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
 677			tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
 678		}
 679
 680		value = PITCH_U(fb->pitches[1]);
 681
 682		if (planes > 2)
 683			value |= PITCH_V(fb->pitches[2]);
 684
 685		tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
 686	} else {
 687		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
 688		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
 689		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
 690		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
 691		tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
 692	}
 693
 694	value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
 695
 696	if (yuv) {
 697		if (bpc < 12)
 698			value |= DEGAMMA_YUV8_10;
 699		else
 700			value |= DEGAMMA_YUV12;
 701
 702		/* XXX parameterize */
 703		value |= COLOR_SPACE_YUV_2020;
 704	} else {
 705		if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
 706			value |= DEGAMMA_SRGB;
 707	}
 708
 709	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
 710
 711	value = OFFSET_X(new_state->src_y >> 16) |
 712		OFFSET_Y(new_state->src_x >> 16);
 713	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
 714
 715	if (dc->soc->supports_block_linear) {
 716		unsigned long height = tegra_plane_state->tiling.value;
 717
 718		/* XXX */
 719		switch (tegra_plane_state->tiling.mode) {
 720		case TEGRA_BO_TILING_MODE_PITCH:
 721			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
 722				DC_WINBUF_SURFACE_KIND_PITCH;
 723			break;
 724
 725		/* XXX not supported on Tegra186 and later */
 726		case TEGRA_BO_TILING_MODE_TILED:
 727			value = DC_WINBUF_SURFACE_KIND_TILED;
 728			break;
 729
 730		case TEGRA_BO_TILING_MODE_BLOCK:
 731			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
 732				DC_WINBUF_SURFACE_KIND_BLOCK;
 733			break;
 734		}
 735
 736		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
 737	}
 738
 739	/* disable gamut CSC */
 740	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
 741	value &= ~CONTROL_CSC_ENABLE;
 742	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
 743
 744	host1x_client_suspend(&dc->client);
 745}
 746
 747static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
 748	.prepare_fb = tegra_plane_prepare_fb,
 749	.cleanup_fb = tegra_plane_cleanup_fb,
 750	.atomic_check = tegra_shared_plane_atomic_check,
 751	.atomic_update = tegra_shared_plane_atomic_update,
 752	.atomic_disable = tegra_shared_plane_atomic_disable,
 753};
 754
 755struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
 756					    struct tegra_dc *dc,
 757					    unsigned int wgrp,
 758					    unsigned int index)
 759{
 760	enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
 761	struct tegra_drm *tegra = drm->dev_private;
 762	struct tegra_display_hub *hub = tegra->hub;
 763	struct tegra_shared_plane *plane;
 764	unsigned int possible_crtcs;
 765	unsigned int num_formats;
 766	const u64 *modifiers;
 767	struct drm_plane *p;
 768	const u32 *formats;
 769	int err;
 770
 771	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
 772	if (!plane)
 773		return ERR_PTR(-ENOMEM);
 774
 775	plane->base.offset = 0x0a00 + 0x0300 * index;
 776	plane->base.index = index;
 777
 778	plane->wgrp = &hub->wgrps[wgrp];
 779	plane->wgrp->parent = &dc->client;
 780
 781	p = &plane->base.base;
 782
 783	/* planes can be assigned to arbitrary CRTCs */
 784	possible_crtcs = BIT(tegra->num_crtcs) - 1;
 785
 786	num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
 787	formats = tegra_shared_plane_formats;
 788	modifiers = tegra_shared_plane_modifiers;
 789
 790	err = drm_universal_plane_init(drm, p, possible_crtcs,
 791				       &tegra_plane_funcs, formats,
 792				       num_formats, modifiers, type, NULL);
 793	if (err < 0) {
 794		kfree(plane);
 795		return ERR_PTR(err);
 796	}
 797
 798	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
 799	drm_plane_create_zpos_property(p, 0, 0, 255);
 800
 801	return p;
 802}
 803
 804static struct drm_private_state *
 805tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
 806{
 807	struct tegra_display_hub_state *state;
 808
 809	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
 810	if (!state)
 811		return NULL;
 812
 813	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
 814
 815	return &state->base;
 816}
 817
 818static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
 819					    struct drm_private_state *state)
 820{
 821	struct tegra_display_hub_state *hub_state =
 822		to_tegra_display_hub_state(state);
 823
 824	kfree(hub_state);
 825}
 826
 827static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
 828	.atomic_duplicate_state = tegra_display_hub_duplicate_state,
 829	.atomic_destroy_state = tegra_display_hub_destroy_state,
 830};
 831
 832static struct tegra_display_hub_state *
 833tegra_display_hub_get_state(struct tegra_display_hub *hub,
 834			    struct drm_atomic_state *state)
 835{
 836	struct drm_private_state *priv;
 837
 838	priv = drm_atomic_get_private_obj_state(state, &hub->base);
 839	if (IS_ERR(priv))
 840		return ERR_CAST(priv);
 841
 842	return to_tegra_display_hub_state(priv);
 843}
 844
 845int tegra_display_hub_atomic_check(struct drm_device *drm,
 846				   struct drm_atomic_state *state)
 847{
 848	struct tegra_drm *tegra = drm->dev_private;
 849	struct tegra_display_hub_state *hub_state;
 850	struct drm_crtc_state *old, *new;
 851	struct drm_crtc *crtc;
 852	unsigned int i;
 853
 854	if (!tegra->hub)
 855		return 0;
 856
 857	hub_state = tegra_display_hub_get_state(tegra->hub, state);
 858	if (IS_ERR(hub_state))
 859		return PTR_ERR(hub_state);
 860
 861	/*
 862	 * The display hub display clock needs to be fed by the display clock
 863	 * with the highest frequency to ensure proper functioning of all the
 864	 * displays.
 865	 *
 866	 * Note that this isn't used before Tegra186, but it doesn't hurt and
 867	 * conditionalizing it would make the code less clean.
 868	 */
 869	for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
 870		struct tegra_dc_state *dc = to_dc_state(new);
 871
 872		if (new->active) {
 873			if (!hub_state->clk || dc->pclk > hub_state->rate) {
 874				hub_state->dc = to_tegra_dc(dc->base.crtc);
 875				hub_state->clk = hub_state->dc->clk;
 876				hub_state->rate = dc->pclk;
 877			}
 878		}
 879	}
 880
 881	return 0;
 882}
 883
 884static void tegra_display_hub_update(struct tegra_dc *dc)
 885{
 886	u32 value;
 887	int err;
 888
 889	err = host1x_client_resume(&dc->client);
 890	if (err < 0) {
 891		dev_err(dc->dev, "failed to resume: %d\n", err);
 892		return;
 893	}
 894
 895	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
 896	value &= ~LATENCY_EVENT;
 897	tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
 898
 899	value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
 900	value = CURS_SLOTS(1) | WGRP_SLOTS(1);
 901	tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
 902
 903	tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
 904	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 905	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
 906	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 907
 908	host1x_client_suspend(&dc->client);
 909}
 910
 911void tegra_display_hub_atomic_commit(struct drm_device *drm,
 912				     struct drm_atomic_state *state)
 913{
 914	struct tegra_drm *tegra = drm->dev_private;
 915	struct tegra_display_hub *hub = tegra->hub;
 916	struct tegra_display_hub_state *hub_state;
 917	struct device *dev = hub->client.dev;
 918	int err;
 919
 920	hub_state = to_tegra_display_hub_state(hub->base.state);
 921
 922	if (hub_state->clk) {
 923		err = clk_set_rate(hub_state->clk, hub_state->rate);
 924		if (err < 0)
 925			dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
 926				hub_state->clk, hub_state->rate);
 927
 928		err = clk_set_parent(hub->clk_disp, hub_state->clk);
 929		if (err < 0)
 930			dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
 931				hub->clk_disp, hub_state->clk, err);
 932	}
 933
 934	if (hub_state->dc)
 935		tegra_display_hub_update(hub_state->dc);
 936}
 937
 938static int tegra_display_hub_init(struct host1x_client *client)
 939{
 940	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 941	struct drm_device *drm = dev_get_drvdata(client->host);
 942	struct tegra_drm *tegra = drm->dev_private;
 943	struct tegra_display_hub_state *state;
 944
 945	state = kzalloc(sizeof(*state), GFP_KERNEL);
 946	if (!state)
 947		return -ENOMEM;
 948
 949	drm_atomic_private_obj_init(drm, &hub->base, &state->base,
 950				    &tegra_display_hub_state_funcs);
 951
 952	tegra->hub = hub;
 953
 954	return 0;
 955}
 956
 957static int tegra_display_hub_exit(struct host1x_client *client)
 958{
 959	struct drm_device *drm = dev_get_drvdata(client->host);
 960	struct tegra_drm *tegra = drm->dev_private;
 961
 962	drm_atomic_private_obj_fini(&tegra->hub->base);
 963	tegra->hub = NULL;
 964
 965	return 0;
 966}
 967
 968static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
 969{
 970	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 971	struct device *dev = client->dev;
 972	unsigned int i = hub->num_heads;
 973	int err;
 974
 975	err = reset_control_assert(hub->rst);
 976	if (err < 0)
 977		return err;
 978
 979	while (i--)
 980		clk_disable_unprepare(hub->clk_heads[i]);
 981
 982	clk_disable_unprepare(hub->clk_hub);
 983	clk_disable_unprepare(hub->clk_dsc);
 984	clk_disable_unprepare(hub->clk_disp);
 985
 986	pm_runtime_put_sync(dev);
 987
 988	return 0;
 989}
 990
 991static int tegra_display_hub_runtime_resume(struct host1x_client *client)
 992{
 993	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 994	struct device *dev = client->dev;
 995	unsigned int i;
 996	int err;
 997
 998	err = pm_runtime_resume_and_get(dev);
 999	if (err < 0) {
1000		dev_err(dev, "failed to get runtime PM: %d\n", err);
1001		return err;
1002	}
1003
1004	err = clk_prepare_enable(hub->clk_disp);
1005	if (err < 0)
1006		goto put_rpm;
1007
1008	err = clk_prepare_enable(hub->clk_dsc);
1009	if (err < 0)
1010		goto disable_disp;
1011
1012	err = clk_prepare_enable(hub->clk_hub);
1013	if (err < 0)
1014		goto disable_dsc;
1015
1016	for (i = 0; i < hub->num_heads; i++) {
1017		err = clk_prepare_enable(hub->clk_heads[i]);
1018		if (err < 0)
1019			goto disable_heads;
1020	}
1021
1022	err = reset_control_deassert(hub->rst);
1023	if (err < 0)
1024		goto disable_heads;
1025
1026	return 0;
1027
1028disable_heads:
1029	while (i--)
1030		clk_disable_unprepare(hub->clk_heads[i]);
1031
1032	clk_disable_unprepare(hub->clk_hub);
1033disable_dsc:
1034	clk_disable_unprepare(hub->clk_dsc);
1035disable_disp:
1036	clk_disable_unprepare(hub->clk_disp);
1037put_rpm:
1038	pm_runtime_put_sync(dev);
1039	return err;
1040}
1041
1042static const struct host1x_client_ops tegra_display_hub_ops = {
1043	.init = tegra_display_hub_init,
1044	.exit = tegra_display_hub_exit,
1045	.suspend = tegra_display_hub_runtime_suspend,
1046	.resume = tegra_display_hub_runtime_resume,
1047};
1048
1049static int tegra_display_hub_probe(struct platform_device *pdev)
1050{
1051	u64 dma_mask = dma_get_mask(pdev->dev.parent);
1052	struct device_node *child = NULL;
1053	struct tegra_display_hub *hub;
1054	struct clk *clk;
1055	unsigned int i;
1056	int err;
1057
1058	err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1059	if (err < 0) {
1060		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1061		return err;
1062	}
1063
1064	hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1065	if (!hub)
1066		return -ENOMEM;
1067
1068	hub->soc = of_device_get_match_data(&pdev->dev);
1069
1070	hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1071	if (IS_ERR(hub->clk_disp)) {
1072		err = PTR_ERR(hub->clk_disp);
1073		return err;
1074	}
1075
1076	if (hub->soc->supports_dsc) {
1077		hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1078		if (IS_ERR(hub->clk_dsc)) {
1079			err = PTR_ERR(hub->clk_dsc);
1080			return err;
1081		}
1082	}
1083
1084	hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1085	if (IS_ERR(hub->clk_hub)) {
1086		err = PTR_ERR(hub->clk_hub);
1087		return err;
1088	}
1089
1090	hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1091	if (IS_ERR(hub->rst)) {
1092		err = PTR_ERR(hub->rst);
1093		return err;
1094	}
1095
1096	hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1097				  sizeof(*hub->wgrps), GFP_KERNEL);
1098	if (!hub->wgrps)
1099		return -ENOMEM;
1100
1101	for (i = 0; i < hub->soc->num_wgrps; i++) {
1102		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1103		char id[16];
1104
1105		snprintf(id, sizeof(id), "wgrp%u", i);
1106		mutex_init(&wgrp->lock);
1107		wgrp->usecount = 0;
1108		wgrp->index = i;
1109
1110		wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1111		if (IS_ERR(wgrp->rst))
1112			return PTR_ERR(wgrp->rst);
1113
1114		err = reset_control_assert(wgrp->rst);
1115		if (err < 0)
1116			return err;
1117	}
1118
1119	hub->num_heads = of_get_child_count(pdev->dev.of_node);
1120
1121	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1122				      GFP_KERNEL);
1123	if (!hub->clk_heads)
1124		return -ENOMEM;
1125
1126	for (i = 0; i < hub->num_heads; i++) {
1127		child = of_get_next_child(pdev->dev.of_node, child);
1128		if (!child) {
1129			dev_err(&pdev->dev, "failed to find node for head %u\n",
1130				i);
1131			return -ENODEV;
1132		}
1133
1134		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1135		if (IS_ERR(clk)) {
1136			dev_err(&pdev->dev, "failed to get clock for head %u\n",
1137				i);
1138			of_node_put(child);
1139			return PTR_ERR(clk);
1140		}
1141
1142		hub->clk_heads[i] = clk;
1143	}
1144
1145	of_node_put(child);
1146
1147	/* XXX: enable clock across reset? */
1148	err = reset_control_assert(hub->rst);
1149	if (err < 0)
1150		return err;
1151
1152	platform_set_drvdata(pdev, hub);
1153	pm_runtime_enable(&pdev->dev);
1154
1155	INIT_LIST_HEAD(&hub->client.list);
1156	hub->client.ops = &tegra_display_hub_ops;
1157	hub->client.dev = &pdev->dev;
1158
1159	err = host1x_client_register(&hub->client);
1160	if (err < 0)
1161		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1162			err);
1163
1164	err = devm_of_platform_populate(&pdev->dev);
1165	if (err < 0)
1166		goto unregister;
1167
1168	return err;
1169
1170unregister:
1171	host1x_client_unregister(&hub->client);
1172	pm_runtime_disable(&pdev->dev);
1173	return err;
1174}
1175
1176static void tegra_display_hub_remove(struct platform_device *pdev)
1177{
1178	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1179	unsigned int i;
1180
1181	host1x_client_unregister(&hub->client);
1182
1183	for (i = 0; i < hub->soc->num_wgrps; i++) {
1184		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1185
1186		mutex_destroy(&wgrp->lock);
1187	}
1188
1189	pm_runtime_disable(&pdev->dev);
1190}
1191
1192static const struct tegra_display_hub_soc tegra186_display_hub = {
1193	.num_wgrps = 6,
1194	.supports_dsc = true,
1195};
1196
1197static const struct tegra_display_hub_soc tegra194_display_hub = {
1198	.num_wgrps = 6,
1199	.supports_dsc = false,
1200};
1201
1202static const struct of_device_id tegra_display_hub_of_match[] = {
1203	{
1204		.compatible = "nvidia,tegra194-display",
1205		.data = &tegra194_display_hub
1206	}, {
1207		.compatible = "nvidia,tegra186-display",
1208		.data = &tegra186_display_hub
1209	}, {
1210		/* sentinel */
1211	}
1212};
1213MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1214
1215struct platform_driver tegra_display_hub_driver = {
1216	.driver = {
1217		.name = "tegra-display-hub",
1218		.of_match_table = tegra_display_hub_of_match,
1219	},
1220	.probe = tegra_display_hub_probe,
1221	.remove = tegra_display_hub_remove,
1222};