Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
  4 */
  5
  6#include <linux/clk.h>
  7#include <linux/delay.h>
 
  8#include <linux/host1x.h>
  9#include <linux/module.h>
 10#include <linux/of.h>
 11#include <linux/of_device.h>
 12#include <linux/of_graph.h>
 
 13#include <linux/platform_device.h>
 14#include <linux/pm_runtime.h>
 15#include <linux/reset.h>
 16
 17#include <drm/drm_atomic.h>
 18#include <drm/drm_atomic_helper.h>
 
 19#include <drm/drm_fourcc.h>
 
 20#include <drm/drm_probe_helper.h>
 21
 22#include "drm.h"
 23#include "dc.h"
 24#include "plane.h"
 25
 
 
 26static const u32 tegra_shared_plane_formats[] = {
 27	DRM_FORMAT_ARGB1555,
 28	DRM_FORMAT_RGB565,
 29	DRM_FORMAT_RGBA5551,
 30	DRM_FORMAT_ARGB8888,
 31	DRM_FORMAT_ABGR8888,
 32	/* new on Tegra114 */
 33	DRM_FORMAT_ABGR4444,
 34	DRM_FORMAT_ABGR1555,
 35	DRM_FORMAT_BGRA5551,
 36	DRM_FORMAT_XRGB1555,
 37	DRM_FORMAT_RGBX5551,
 38	DRM_FORMAT_XBGR1555,
 39	DRM_FORMAT_BGRX5551,
 40	DRM_FORMAT_BGR565,
 41	DRM_FORMAT_XRGB8888,
 42	DRM_FORMAT_XBGR8888,
 43	/* planar formats */
 44	DRM_FORMAT_UYVY,
 45	DRM_FORMAT_YUYV,
 46	DRM_FORMAT_YUV420,
 47	DRM_FORMAT_YUV422,
 48};
 49
 50static const u64 tegra_shared_plane_modifiers[] = {
 51	DRM_FORMAT_MOD_LINEAR,
 52	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
 53	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
 54	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
 55	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
 56	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
 57	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
 
 
 
 
 
 
 
 
 
 
 
 
 58	DRM_FORMAT_MOD_INVALID
 59};
 60
 61static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
 62					      unsigned int offset)
 63{
 64	if (offset >= 0x500 && offset <= 0x581) {
 65		offset = 0x000 + (offset - 0x500);
 66		return plane->offset + offset;
 67	}
 68
 69	if (offset >= 0x700 && offset <= 0x73c) {
 70		offset = 0x180 + (offset - 0x700);
 71		return plane->offset + offset;
 72	}
 73
 74	if (offset >= 0x800 && offset <= 0x83e) {
 75		offset = 0x1c0 + (offset - 0x800);
 76		return plane->offset + offset;
 77	}
 78
 79	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
 80
 81	return plane->offset + offset;
 82}
 83
 84static inline u32 tegra_plane_readl(struct tegra_plane *plane,
 85				    unsigned int offset)
 86{
 87	return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
 88}
 89
 90static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
 91				      unsigned int offset)
 92{
 93	tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
 94}
 95
 96static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
 97{
 
 
 98	mutex_lock(&wgrp->lock);
 99
100	if (wgrp->usecount == 0) {
101		pm_runtime_get_sync(wgrp->parent);
 
 
 
 
 
102		reset_control_deassert(wgrp->rst);
103	}
104
105	wgrp->usecount++;
106	mutex_unlock(&wgrp->lock);
107
108	return 0;
 
 
109}
110
111static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
112{
113	int err;
114
115	mutex_lock(&wgrp->lock);
116
117	if (wgrp->usecount == 1) {
118		err = reset_control_assert(wgrp->rst);
119		if (err < 0) {
120			pr_err("failed to assert reset for window group %u\n",
121			       wgrp->index);
122		}
123
124		pm_runtime_put(wgrp->parent);
125	}
126
127	wgrp->usecount--;
128	mutex_unlock(&wgrp->lock);
129}
130
131int tegra_display_hub_prepare(struct tegra_display_hub *hub)
132{
133	unsigned int i;
134
135	/*
136	 * XXX Enabling/disabling windowgroups needs to happen when the owner
137	 * display controller is disabled. There's currently no good point at
138	 * which this could be executed, so unconditionally enable all window
139	 * groups for now.
140	 */
141	for (i = 0; i < hub->soc->num_wgrps; i++) {
142		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
143
144		tegra_windowgroup_enable(wgrp);
 
 
145	}
146
147	return 0;
148}
149
150void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
151{
152	unsigned int i;
153
154	/*
155	 * XXX Remove this once window groups can be more fine-grainedly
156	 * enabled and disabled.
157	 */
158	for (i = 0; i < hub->soc->num_wgrps; i++) {
159		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
160
161		tegra_windowgroup_disable(wgrp);
 
 
162	}
163}
164
165static void tegra_shared_plane_update(struct tegra_plane *plane)
166{
167	struct tegra_dc *dc = plane->dc;
168	unsigned long timeout;
169	u32 mask, value;
170
171	mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
172	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
173
174	timeout = jiffies + msecs_to_jiffies(1000);
175
176	while (time_before(jiffies, timeout)) {
177		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
178		if ((value & mask) == 0)
179			break;
180
181		usleep_range(100, 400);
182	}
183}
184
185static void tegra_shared_plane_activate(struct tegra_plane *plane)
186{
187	struct tegra_dc *dc = plane->dc;
188	unsigned long timeout;
189	u32 mask, value;
190
191	mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
192	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
193
194	timeout = jiffies + msecs_to_jiffies(1000);
195
196	while (time_before(jiffies, timeout)) {
197		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
198		if ((value & mask) == 0)
199			break;
200
201		usleep_range(100, 400);
202	}
203}
204
205static unsigned int
206tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
207{
208	unsigned int offset =
209		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
210
211	return tegra_dc_readl(dc, offset) & OWNER_MASK;
212}
213
214static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
215				       struct tegra_plane *plane)
216{
217	struct device *dev = dc->dev;
218
219	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
220		if (plane->dc == dc)
221			return true;
222
223		dev_WARN(dev, "head %u owns window %u but is not attached\n",
224			 dc->pipe, plane->index);
225	}
226
227	return false;
228}
229
230static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
231					struct tegra_dc *new)
232{
233	unsigned int offset =
234		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
235	struct tegra_dc *old = plane->dc, *dc = new ? new : old;
236	struct device *dev = new ? new->dev : old->dev;
237	unsigned int owner, index = plane->index;
238	u32 value;
239
240	value = tegra_dc_readl(dc, offset);
241	owner = value & OWNER_MASK;
242
243	if (new && (owner != OWNER_MASK && owner != new->pipe)) {
244		dev_WARN(dev, "window %u owned by head %u\n", index, owner);
245		return -EBUSY;
246	}
247
248	/*
249	 * This seems to happen whenever the head has been disabled with one
250	 * or more windows being active. This is harmless because we'll just
251	 * reassign the window to the new head anyway.
252	 */
253	if (old && owner == OWNER_MASK)
254		dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
255			old->pipe, owner);
256
257	value &= ~OWNER_MASK;
258
259	if (new)
260		value |= OWNER(new->pipe);
261	else
262		value |= OWNER_MASK;
263
264	tegra_dc_writel(dc, value, offset);
265
266	plane->dc = new;
267
268	return 0;
269}
270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
272					 struct tegra_plane *plane)
273{
274	u32 value;
275	int err;
276
277	if (!tegra_dc_owns_shared_plane(dc, plane)) {
278		err = tegra_shared_plane_set_owner(plane, dc);
279		if (err < 0)
280			return;
281	}
282
283	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
284	value |= MODE_FOUR_LINES;
285	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
286
287	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
288	value = SLOTS(1);
289	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
290
291	/* disable watermark */
292	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
293	value &= ~LATENCY_CTL_MODE_ENABLE;
294	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
295
296	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
297	value |= WATERMARK_MASK;
298	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
299
300	/* pipe meter */
301	value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
302	value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
303	tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
304
305	/* mempool entries */
306	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
307	value = MEMPOOL_ENTRIES(0x331);
308	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
309
310	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
311	value &= ~THREAD_NUM_MASK;
312	value |= THREAD_NUM(plane->base.index);
313	value |= THREAD_GROUP_ENABLE;
314	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
315
 
 
316	tegra_shared_plane_update(plane);
317	tegra_shared_plane_activate(plane);
318}
319
320static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
321					 struct tegra_plane *plane)
322{
323	tegra_shared_plane_set_owner(plane, NULL);
324}
325
326static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
327					   struct drm_plane_state *state)
328{
329	struct tegra_plane_state *plane_state = to_tegra_plane_state(state);
 
 
330	struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
331	struct tegra_bo_tiling *tiling = &plane_state->tiling;
332	struct tegra_dc *dc = to_tegra_dc(state->crtc);
333	int err;
334
335	/* no need for further checks if the plane is being disabled */
336	if (!state->crtc || !state->fb)
337		return 0;
338
339	err = tegra_plane_format(state->fb->format->format,
340				 &plane_state->format,
341				 &plane_state->swap);
342	if (err < 0)
343		return err;
344
345	err = tegra_fb_get_tiling(state->fb, tiling);
346	if (err < 0)
347		return err;
348
349	if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
350	    !dc->soc->supports_block_linear) {
351		DRM_ERROR("hardware doesn't support block linear mode\n");
352		return -EINVAL;
353	}
354
 
 
 
 
 
 
355	/*
356	 * Tegra doesn't support different strides for U and V planes so we
357	 * error out if the user tries to display a framebuffer with such a
358	 * configuration.
359	 */
360	if (state->fb->format->num_planes > 2) {
361		if (state->fb->pitches[2] != state->fb->pitches[1]) {
362			DRM_ERROR("unsupported UV-plane configuration\n");
363			return -EINVAL;
364		}
365	}
366
367	/* XXX scaling is not yet supported, add a check here */
368
369	err = tegra_plane_state_add(&tegra->base, state);
370	if (err < 0)
371		return err;
372
373	return 0;
374}
375
376static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
377					      struct drm_plane_state *old_state)
378{
 
 
379	struct tegra_plane *p = to_tegra_plane(plane);
380	struct tegra_dc *dc;
381	u32 value;
 
382
383	/* rien ne va plus */
384	if (!old_state || !old_state->crtc)
385		return;
386
387	dc = to_tegra_dc(old_state->crtc);
388
 
 
 
 
 
 
389	/*
390	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
391	 * on planes that are already disabled. Make sure we fallback to the
392	 * head for this particular state instead of crashing.
393	 */
394	if (WARN_ON(p->dc == NULL))
395		p->dc = dc;
396
397	pm_runtime_get_sync(dc->dev);
398
399	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
400	value &= ~WIN_ENABLE;
401	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
402
403	tegra_dc_remove_shared_plane(dc, p);
404
405	pm_runtime_put(dc->dev);
 
 
 
 
 
 
 
 
 
 
 
406}
407
408static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
409					     struct drm_plane_state *old_state)
410{
411	struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
412	struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
413	unsigned int zpos = plane->state->normalized_zpos;
414	struct drm_framebuffer *fb = plane->state->fb;
 
 
415	struct tegra_plane *p = to_tegra_plane(plane);
416	struct tegra_bo *bo;
417	dma_addr_t base;
418	u32 value;
 
 
419
420	/* rien ne va plus */
421	if (!plane->state->crtc || !plane->state->fb)
 
 
 
 
422		return;
 
423
424	if (!plane->state->visible) {
425		tegra_shared_plane_atomic_disable(plane, old_state);
 
426		return;
427	}
428
429	pm_runtime_get_sync(dc->dev);
430
431	tegra_dc_assign_shared_plane(dc, p);
432
433	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
434
435	/* blending */
436	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
437		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
438		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
439	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
440
441	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
442		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
443		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
444	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
445
446	value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
447	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
448
449	/* bypass scaling */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450	value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
451	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
452
453	value = INPUT_SCALER_VBYPASS | INPUT_SCALER_HBYPASS;
454	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455
456	/* disable compression */
457	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
458
459	bo = tegra_fb_get_plane(fb, 0);
460	base = bo->paddr;
 
 
 
 
 
 
 
 
 
 
461
462	tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
463	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
464
465	value = V_POSITION(plane->state->crtc_y) |
466		H_POSITION(plane->state->crtc_x);
467	tegra_plane_writel(p, value, DC_WIN_POSITION);
468
469	value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
470	tegra_plane_writel(p, value, DC_WIN_SIZE);
471
472	value = WIN_ENABLE | COLOR_EXPAND;
473	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
474
475	value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
476	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
477
478	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
479	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
480
481	value = PITCH(fb->pitches[0]);
482	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
483
484	value = CLAMP_BEFORE_BLEND | DEGAMMA_SRGB | INPUT_RANGE_FULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
486
487	value = OFFSET_X(plane->state->src_y >> 16) |
488		OFFSET_Y(plane->state->src_x >> 16);
489	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
490
491	if (dc->soc->supports_block_linear) {
492		unsigned long height = state->tiling.value;
493
494		/* XXX */
495		switch (state->tiling.mode) {
496		case TEGRA_BO_TILING_MODE_PITCH:
497			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
498				DC_WINBUF_SURFACE_KIND_PITCH;
499			break;
500
501		/* XXX not supported on Tegra186 and later */
502		case TEGRA_BO_TILING_MODE_TILED:
503			value = DC_WINBUF_SURFACE_KIND_TILED;
504			break;
505
506		case TEGRA_BO_TILING_MODE_BLOCK:
507			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
508				DC_WINBUF_SURFACE_KIND_BLOCK;
509			break;
510		}
511
512		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
513	}
514
515	/* disable gamut CSC */
516	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
517	value &= ~CONTROL_CSC_ENABLE;
518	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
519
520	pm_runtime_put(dc->dev);
521}
522
523static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
 
 
524	.atomic_check = tegra_shared_plane_atomic_check,
525	.atomic_update = tegra_shared_plane_atomic_update,
526	.atomic_disable = tegra_shared_plane_atomic_disable,
527};
528
529struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
530					    struct tegra_dc *dc,
531					    unsigned int wgrp,
532					    unsigned int index)
533{
534	enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
535	struct tegra_drm *tegra = drm->dev_private;
536	struct tegra_display_hub *hub = tegra->hub;
537	/* planes can be assigned to arbitrary CRTCs */
538	unsigned int possible_crtcs = 0x7;
539	struct tegra_shared_plane *plane;
 
540	unsigned int num_formats;
541	const u64 *modifiers;
542	struct drm_plane *p;
543	const u32 *formats;
544	int err;
545
546	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
547	if (!plane)
548		return ERR_PTR(-ENOMEM);
549
550	plane->base.offset = 0x0a00 + 0x0300 * index;
551	plane->base.index = index;
552
553	plane->wgrp = &hub->wgrps[wgrp];
554	plane->wgrp->parent = dc->dev;
555
556	p = &plane->base.base;
557
 
 
 
558	num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
559	formats = tegra_shared_plane_formats;
560	modifiers = tegra_shared_plane_modifiers;
561
562	err = drm_universal_plane_init(drm, p, possible_crtcs,
563				       &tegra_plane_funcs, formats,
564				       num_formats, modifiers, type, NULL);
565	if (err < 0) {
566		kfree(plane);
567		return ERR_PTR(err);
568	}
569
570	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
571	drm_plane_create_zpos_property(p, 0, 0, 255);
572
573	return p;
574}
575
576static struct drm_private_state *
577tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
578{
579	struct tegra_display_hub_state *state;
580
581	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
582	if (!state)
583		return NULL;
584
585	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
586
587	return &state->base;
588}
589
590static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
591					    struct drm_private_state *state)
592{
593	struct tegra_display_hub_state *hub_state =
594		to_tegra_display_hub_state(state);
595
596	kfree(hub_state);
597}
598
599static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
600	.atomic_duplicate_state = tegra_display_hub_duplicate_state,
601	.atomic_destroy_state = tegra_display_hub_destroy_state,
602};
603
604static struct tegra_display_hub_state *
605tegra_display_hub_get_state(struct tegra_display_hub *hub,
606			    struct drm_atomic_state *state)
607{
608	struct drm_device *drm = dev_get_drvdata(hub->client.parent);
609	struct drm_private_state *priv;
610
611	WARN_ON(!drm_modeset_is_locked(&drm->mode_config.connection_mutex));
612
613	priv = drm_atomic_get_private_obj_state(state, &hub->base);
614	if (IS_ERR(priv))
615		return ERR_CAST(priv);
616
617	return to_tegra_display_hub_state(priv);
618}
619
620int tegra_display_hub_atomic_check(struct drm_device *drm,
621				   struct drm_atomic_state *state)
622{
623	struct tegra_drm *tegra = drm->dev_private;
624	struct tegra_display_hub_state *hub_state;
625	struct drm_crtc_state *old, *new;
626	struct drm_crtc *crtc;
627	unsigned int i;
628
629	if (!tegra->hub)
630		return 0;
631
632	hub_state = tegra_display_hub_get_state(tegra->hub, state);
633	if (IS_ERR(hub_state))
634		return PTR_ERR(hub_state);
635
636	/*
637	 * The display hub display clock needs to be fed by the display clock
638	 * with the highest frequency to ensure proper functioning of all the
639	 * displays.
640	 *
641	 * Note that this isn't used before Tegra186, but it doesn't hurt and
642	 * conditionalizing it would make the code less clean.
643	 */
644	for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
645		struct tegra_dc_state *dc = to_dc_state(new);
646
647		if (new->active) {
648			if (!hub_state->clk || dc->pclk > hub_state->rate) {
649				hub_state->dc = to_tegra_dc(dc->base.crtc);
650				hub_state->clk = hub_state->dc->clk;
651				hub_state->rate = dc->pclk;
652			}
653		}
654	}
655
656	return 0;
657}
658
659static void tegra_display_hub_update(struct tegra_dc *dc)
660{
661	u32 value;
 
662
663	pm_runtime_get_sync(dc->dev);
 
 
 
 
664
665	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
666	value &= ~LATENCY_EVENT;
667	tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
668
669	value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
670	value = CURS_SLOTS(1) | WGRP_SLOTS(1);
671	tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
672
673	tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
674	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
675	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
676	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
677
678	pm_runtime_put(dc->dev);
679}
680
681void tegra_display_hub_atomic_commit(struct drm_device *drm,
682				     struct drm_atomic_state *state)
683{
684	struct tegra_drm *tegra = drm->dev_private;
685	struct tegra_display_hub *hub = tegra->hub;
686	struct tegra_display_hub_state *hub_state;
687	struct device *dev = hub->client.dev;
688	int err;
689
690	hub_state = to_tegra_display_hub_state(hub->base.state);
691
692	if (hub_state->clk) {
693		err = clk_set_rate(hub_state->clk, hub_state->rate);
694		if (err < 0)
695			dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
696				hub_state->clk, hub_state->rate);
697
698		err = clk_set_parent(hub->clk_disp, hub_state->clk);
699		if (err < 0)
700			dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
701				hub->clk_disp, hub_state->clk, err);
702	}
703
704	if (hub_state->dc)
705		tegra_display_hub_update(hub_state->dc);
706}
707
708static int tegra_display_hub_init(struct host1x_client *client)
709{
710	struct tegra_display_hub *hub = to_tegra_display_hub(client);
711	struct drm_device *drm = dev_get_drvdata(client->parent);
712	struct tegra_drm *tegra = drm->dev_private;
713	struct tegra_display_hub_state *state;
714
715	state = kzalloc(sizeof(*state), GFP_KERNEL);
716	if (!state)
717		return -ENOMEM;
718
719	drm_atomic_private_obj_init(drm, &hub->base, &state->base,
720				    &tegra_display_hub_state_funcs);
721
722	tegra->hub = hub;
723
724	return 0;
725}
726
727static int tegra_display_hub_exit(struct host1x_client *client)
728{
729	struct drm_device *drm = dev_get_drvdata(client->parent);
730	struct tegra_drm *tegra = drm->dev_private;
731
732	drm_atomic_private_obj_fini(&tegra->hub->base);
733	tegra->hub = NULL;
734
735	return 0;
736}
737
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
738static const struct host1x_client_ops tegra_display_hub_ops = {
739	.init = tegra_display_hub_init,
740	.exit = tegra_display_hub_exit,
 
 
741};
742
743static int tegra_display_hub_probe(struct platform_device *pdev)
744{
 
745	struct device_node *child = NULL;
746	struct tegra_display_hub *hub;
747	struct clk *clk;
748	unsigned int i;
749	int err;
750
 
 
 
 
 
 
751	hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
752	if (!hub)
753		return -ENOMEM;
754
755	hub->soc = of_device_get_match_data(&pdev->dev);
756
757	hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
758	if (IS_ERR(hub->clk_disp)) {
759		err = PTR_ERR(hub->clk_disp);
760		return err;
761	}
762
763	if (hub->soc->supports_dsc) {
764		hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
765		if (IS_ERR(hub->clk_dsc)) {
766			err = PTR_ERR(hub->clk_dsc);
767			return err;
768		}
769	}
770
771	hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
772	if (IS_ERR(hub->clk_hub)) {
773		err = PTR_ERR(hub->clk_hub);
774		return err;
775	}
776
777	hub->rst = devm_reset_control_get(&pdev->dev, "misc");
778	if (IS_ERR(hub->rst)) {
779		err = PTR_ERR(hub->rst);
780		return err;
781	}
782
783	hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
784				  sizeof(*hub->wgrps), GFP_KERNEL);
785	if (!hub->wgrps)
786		return -ENOMEM;
787
788	for (i = 0; i < hub->soc->num_wgrps; i++) {
789		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
790		char id[8];
791
792		snprintf(id, sizeof(id), "wgrp%u", i);
793		mutex_init(&wgrp->lock);
794		wgrp->usecount = 0;
795		wgrp->index = i;
796
797		wgrp->rst = devm_reset_control_get(&pdev->dev, id);
798		if (IS_ERR(wgrp->rst))
799			return PTR_ERR(wgrp->rst);
800
801		err = reset_control_assert(wgrp->rst);
802		if (err < 0)
803			return err;
804	}
805
806	hub->num_heads = of_get_child_count(pdev->dev.of_node);
807
808	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
809				      GFP_KERNEL);
810	if (!hub->clk_heads)
811		return -ENOMEM;
812
813	for (i = 0; i < hub->num_heads; i++) {
814		child = of_get_next_child(pdev->dev.of_node, child);
815		if (!child) {
816			dev_err(&pdev->dev, "failed to find node for head %u\n",
817				i);
818			return -ENODEV;
819		}
820
821		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
822		if (IS_ERR(clk)) {
823			dev_err(&pdev->dev, "failed to get clock for head %u\n",
824				i);
825			of_node_put(child);
826			return PTR_ERR(clk);
827		}
828
829		hub->clk_heads[i] = clk;
830	}
831
832	of_node_put(child);
833
834	/* XXX: enable clock across reset? */
835	err = reset_control_assert(hub->rst);
836	if (err < 0)
837		return err;
838
839	platform_set_drvdata(pdev, hub);
840	pm_runtime_enable(&pdev->dev);
841
842	INIT_LIST_HEAD(&hub->client.list);
843	hub->client.ops = &tegra_display_hub_ops;
844	hub->client.dev = &pdev->dev;
845
846	err = host1x_client_register(&hub->client);
847	if (err < 0)
848		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
849			err);
850
851	return err;
852}
853
854static int tegra_display_hub_remove(struct platform_device *pdev)
855{
856	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
857	int err;
858
859	err = host1x_client_unregister(&hub->client);
860	if (err < 0) {
861		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
862			err);
863	}
864
 
 
865	pm_runtime_disable(&pdev->dev);
866
867	return err;
868}
869
870static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
871{
872	struct tegra_display_hub *hub = dev_get_drvdata(dev);
873	unsigned int i = hub->num_heads;
874	int err;
875
876	err = reset_control_assert(hub->rst);
877	if (err < 0)
878		return err;
879
880	while (i--)
881		clk_disable_unprepare(hub->clk_heads[i]);
882
883	clk_disable_unprepare(hub->clk_hub);
884	clk_disable_unprepare(hub->clk_dsc);
885	clk_disable_unprepare(hub->clk_disp);
886
887	return 0;
888}
889
890static int __maybe_unused tegra_display_hub_resume(struct device *dev)
891{
892	struct tegra_display_hub *hub = dev_get_drvdata(dev);
893	unsigned int i;
894	int err;
895
896	err = clk_prepare_enable(hub->clk_disp);
897	if (err < 0)
898		return err;
899
900	err = clk_prepare_enable(hub->clk_dsc);
901	if (err < 0)
902		goto disable_disp;
903
904	err = clk_prepare_enable(hub->clk_hub);
905	if (err < 0)
906		goto disable_dsc;
907
908	for (i = 0; i < hub->num_heads; i++) {
909		err = clk_prepare_enable(hub->clk_heads[i]);
910		if (err < 0)
911			goto disable_heads;
912	}
913
914	err = reset_control_deassert(hub->rst);
915	if (err < 0)
916		goto disable_heads;
917
918	return 0;
919
920disable_heads:
921	while (i--)
922		clk_disable_unprepare(hub->clk_heads[i]);
923
924	clk_disable_unprepare(hub->clk_hub);
925disable_dsc:
926	clk_disable_unprepare(hub->clk_dsc);
927disable_disp:
928	clk_disable_unprepare(hub->clk_disp);
929	return err;
930}
931
932static const struct dev_pm_ops tegra_display_hub_pm_ops = {
933	SET_RUNTIME_PM_OPS(tegra_display_hub_suspend,
934			   tegra_display_hub_resume, NULL)
935};
936
937static const struct tegra_display_hub_soc tegra186_display_hub = {
938	.num_wgrps = 6,
939	.supports_dsc = true,
940};
941
942static const struct tegra_display_hub_soc tegra194_display_hub = {
943	.num_wgrps = 6,
944	.supports_dsc = false,
945};
946
947static const struct of_device_id tegra_display_hub_of_match[] = {
948	{
949		.compatible = "nvidia,tegra194-display",
950		.data = &tegra194_display_hub
951	}, {
952		.compatible = "nvidia,tegra186-display",
953		.data = &tegra186_display_hub
954	}, {
955		/* sentinel */
956	}
957};
958MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
959
960struct platform_driver tegra_display_hub_driver = {
961	.driver = {
962		.name = "tegra-display-hub",
963		.of_match_table = tegra_display_hub_of_match,
964		.pm = &tegra_display_hub_pm_ops,
965	},
966	.probe = tegra_display_hub_probe,
967	.remove = tegra_display_hub_remove,
968};
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
   4 */
   5
   6#include <linux/clk.h>
   7#include <linux/delay.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/host1x.h>
  10#include <linux/module.h>
  11#include <linux/of.h>
 
  12#include <linux/of_graph.h>
  13#include <linux/of_platform.h>
  14#include <linux/platform_device.h>
  15#include <linux/pm_runtime.h>
  16#include <linux/reset.h>
  17
  18#include <drm/drm_atomic.h>
  19#include <drm/drm_atomic_helper.h>
  20#include <drm/drm_blend.h>
  21#include <drm/drm_fourcc.h>
  22#include <drm/drm_framebuffer.h>
  23#include <drm/drm_probe_helper.h>
  24
  25#include "drm.h"
  26#include "dc.h"
  27#include "plane.h"
  28
  29#define NFB 24
  30
  31static const u32 tegra_shared_plane_formats[] = {
  32	DRM_FORMAT_ARGB1555,
  33	DRM_FORMAT_RGB565,
  34	DRM_FORMAT_RGBA5551,
  35	DRM_FORMAT_ARGB8888,
  36	DRM_FORMAT_ABGR8888,
  37	/* new on Tegra114 */
  38	DRM_FORMAT_ABGR4444,
  39	DRM_FORMAT_ABGR1555,
  40	DRM_FORMAT_BGRA5551,
  41	DRM_FORMAT_XRGB1555,
  42	DRM_FORMAT_RGBX5551,
  43	DRM_FORMAT_XBGR1555,
  44	DRM_FORMAT_BGRX5551,
  45	DRM_FORMAT_BGR565,
  46	DRM_FORMAT_XRGB8888,
  47	DRM_FORMAT_XBGR8888,
  48	/* planar formats */
  49	DRM_FORMAT_UYVY,
  50	DRM_FORMAT_YUYV,
  51	DRM_FORMAT_YUV420,
  52	DRM_FORMAT_YUV422,
  53};
  54
  55static const u64 tegra_shared_plane_modifiers[] = {
  56	DRM_FORMAT_MOD_LINEAR,
  57	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
  58	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
  59	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
  60	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
  61	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
  62	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
  63	/*
  64	 * The GPU sector layout is only supported on Tegra194, but these will
  65	 * be filtered out later on by ->format_mod_supported() on SoCs where
  66	 * it isn't supported.
  67	 */
  68	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  69	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  70	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  71	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  72	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  73	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  74	/* sentinel */
  75	DRM_FORMAT_MOD_INVALID
  76};
  77
  78static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
  79					      unsigned int offset)
  80{
  81	if (offset >= 0x500 && offset <= 0x581) {
  82		offset = 0x000 + (offset - 0x500);
  83		return plane->offset + offset;
  84	}
  85
  86	if (offset >= 0x700 && offset <= 0x73c) {
  87		offset = 0x180 + (offset - 0x700);
  88		return plane->offset + offset;
  89	}
  90
  91	if (offset >= 0x800 && offset <= 0x83e) {
  92		offset = 0x1c0 + (offset - 0x800);
  93		return plane->offset + offset;
  94	}
  95
  96	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
  97
  98	return plane->offset + offset;
  99}
 100
 101static inline u32 tegra_plane_readl(struct tegra_plane *plane,
 102				    unsigned int offset)
 103{
 104	return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
 105}
 106
 107static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
 108				      unsigned int offset)
 109{
 110	tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
 111}
 112
 113static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
 114{
 115	int err = 0;
 116
 117	mutex_lock(&wgrp->lock);
 118
 119	if (wgrp->usecount == 0) {
 120		err = host1x_client_resume(wgrp->parent);
 121		if (err < 0) {
 122			dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
 123			goto unlock;
 124		}
 125
 126		reset_control_deassert(wgrp->rst);
 127	}
 128
 129	wgrp->usecount++;
 
 130
 131unlock:
 132	mutex_unlock(&wgrp->lock);
 133	return err;
 134}
 135
 136static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
 137{
 138	int err;
 139
 140	mutex_lock(&wgrp->lock);
 141
 142	if (wgrp->usecount == 1) {
 143		err = reset_control_assert(wgrp->rst);
 144		if (err < 0) {
 145			pr_err("failed to assert reset for window group %u\n",
 146			       wgrp->index);
 147		}
 148
 149		host1x_client_suspend(wgrp->parent);
 150	}
 151
 152	wgrp->usecount--;
 153	mutex_unlock(&wgrp->lock);
 154}
 155
 156int tegra_display_hub_prepare(struct tegra_display_hub *hub)
 157{
 158	unsigned int i;
 159
 160	/*
 161	 * XXX Enabling/disabling windowgroups needs to happen when the owner
 162	 * display controller is disabled. There's currently no good point at
 163	 * which this could be executed, so unconditionally enable all window
 164	 * groups for now.
 165	 */
 166	for (i = 0; i < hub->soc->num_wgrps; i++) {
 167		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 168
 169		/* Skip orphaned window group whose parent DC is disabled */
 170		if (wgrp->parent)
 171			tegra_windowgroup_enable(wgrp);
 172	}
 173
 174	return 0;
 175}
 176
 177void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
 178{
 179	unsigned int i;
 180
 181	/*
 182	 * XXX Remove this once window groups can be more fine-grainedly
 183	 * enabled and disabled.
 184	 */
 185	for (i = 0; i < hub->soc->num_wgrps; i++) {
 186		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 187
 188		/* Skip orphaned window group whose parent DC is disabled */
 189		if (wgrp->parent)
 190			tegra_windowgroup_disable(wgrp);
 191	}
 192}
 193
 194static void tegra_shared_plane_update(struct tegra_plane *plane)
 195{
 196	struct tegra_dc *dc = plane->dc;
 197	unsigned long timeout;
 198	u32 mask, value;
 199
 200	mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
 201	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
 202
 203	timeout = jiffies + msecs_to_jiffies(1000);
 204
 205	while (time_before(jiffies, timeout)) {
 206		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 207		if ((value & mask) == 0)
 208			break;
 209
 210		usleep_range(100, 400);
 211	}
 212}
 213
 214static void tegra_shared_plane_activate(struct tegra_plane *plane)
 215{
 216	struct tegra_dc *dc = plane->dc;
 217	unsigned long timeout;
 218	u32 mask, value;
 219
 220	mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
 221	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
 222
 223	timeout = jiffies + msecs_to_jiffies(1000);
 224
 225	while (time_before(jiffies, timeout)) {
 226		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 227		if ((value & mask) == 0)
 228			break;
 229
 230		usleep_range(100, 400);
 231	}
 232}
 233
 234static unsigned int
 235tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
 236{
 237	unsigned int offset =
 238		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
 239
 240	return tegra_dc_readl(dc, offset) & OWNER_MASK;
 241}
 242
 243static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
 244				       struct tegra_plane *plane)
 245{
 246	struct device *dev = dc->dev;
 247
 248	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
 249		if (plane->dc == dc)
 250			return true;
 251
 252		dev_WARN(dev, "head %u owns window %u but is not attached\n",
 253			 dc->pipe, plane->index);
 254	}
 255
 256	return false;
 257}
 258
 259static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
 260					struct tegra_dc *new)
 261{
 262	unsigned int offset =
 263		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
 264	struct tegra_dc *old = plane->dc, *dc = new ? new : old;
 265	struct device *dev = new ? new->dev : old->dev;
 266	unsigned int owner, index = plane->index;
 267	u32 value;
 268
 269	value = tegra_dc_readl(dc, offset);
 270	owner = value & OWNER_MASK;
 271
 272	if (new && (owner != OWNER_MASK && owner != new->pipe)) {
 273		dev_WARN(dev, "window %u owned by head %u\n", index, owner);
 274		return -EBUSY;
 275	}
 276
 277	/*
 278	 * This seems to happen whenever the head has been disabled with one
 279	 * or more windows being active. This is harmless because we'll just
 280	 * reassign the window to the new head anyway.
 281	 */
 282	if (old && owner == OWNER_MASK)
 283		dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
 284			old->pipe, owner);
 285
 286	value &= ~OWNER_MASK;
 287
 288	if (new)
 289		value |= OWNER(new->pipe);
 290	else
 291		value |= OWNER_MASK;
 292
 293	tegra_dc_writel(dc, value, offset);
 294
 295	plane->dc = new;
 296
 297	return 0;
 298}
 299
 300static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
 301{
 302	static const unsigned int coeffs[192] = {
 303		0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
 304		0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
 305		0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
 306		0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
 307		0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
 308		0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
 309		0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
 310		0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
 311		0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
 312		0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
 313		0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
 314		0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
 315		0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
 316		0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
 317		0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
 318		0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
 319		0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
 320		0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
 321		0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
 322		0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
 323		0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
 324		0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
 325		0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
 326		0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
 327		0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
 328		0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
 329		0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
 330		0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
 331		0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
 332		0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
 333		0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
 334		0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
 335		0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
 336		0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
 337		0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
 338		0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
 339		0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
 340		0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
 341		0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
 342		0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
 343		0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
 344		0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
 345		0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
 346		0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
 347		0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
 348		0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
 349		0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
 350		0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
 351	};
 352	unsigned int ratio, row, column;
 353
 354	for (ratio = 0; ratio <= 2; ratio++) {
 355		for (row = 0; row <= 15; row++) {
 356			for (column = 0; column <= 3; column++) {
 357				unsigned int index = (ratio << 6) + (row << 2) + column;
 358				u32 value;
 359
 360				value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
 361				tegra_plane_writel(plane, value,
 362						   DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
 363			}
 364		}
 365	}
 366}
 367
 368static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
 369					 struct tegra_plane *plane)
 370{
 371	u32 value;
 372	int err;
 373
 374	if (!tegra_dc_owns_shared_plane(dc, plane)) {
 375		err = tegra_shared_plane_set_owner(plane, dc);
 376		if (err < 0)
 377			return;
 378	}
 379
 380	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
 381	value |= MODE_FOUR_LINES;
 382	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
 383
 384	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
 385	value = SLOTS(1);
 386	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
 387
 388	/* disable watermark */
 389	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
 390	value &= ~LATENCY_CTL_MODE_ENABLE;
 391	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
 392
 393	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
 394	value |= WATERMARK_MASK;
 395	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
 396
 397	/* pipe meter */
 398	value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
 399	value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
 400	tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
 401
 402	/* mempool entries */
 403	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
 404	value = MEMPOOL_ENTRIES(0x331);
 405	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
 406
 407	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
 408	value &= ~THREAD_NUM_MASK;
 409	value |= THREAD_NUM(plane->base.index);
 410	value |= THREAD_GROUP_ENABLE;
 411	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
 412
 413	tegra_shared_plane_setup_scaler(plane);
 414
 415	tegra_shared_plane_update(plane);
 416	tegra_shared_plane_activate(plane);
 417}
 418
 419static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
 420					 struct tegra_plane *plane)
 421{
 422	tegra_shared_plane_set_owner(plane, NULL);
 423}
 424
 425static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
 426					   struct drm_atomic_state *state)
 427{
 428	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
 429										 plane);
 430	struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
 431	struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
 432	struct tegra_bo_tiling *tiling = &plane_state->tiling;
 433	struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
 434	int err;
 435
 436	/* no need for further checks if the plane is being disabled */
 437	if (!new_plane_state->crtc || !new_plane_state->fb)
 438		return 0;
 439
 440	err = tegra_plane_format(new_plane_state->fb->format->format,
 441				 &plane_state->format,
 442				 &plane_state->swap);
 443	if (err < 0)
 444		return err;
 445
 446	err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
 447	if (err < 0)
 448		return err;
 449
 450	if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
 451	    !dc->soc->supports_block_linear) {
 452		DRM_ERROR("hardware doesn't support block linear mode\n");
 453		return -EINVAL;
 454	}
 455
 456	if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
 457	    !dc->soc->supports_sector_layout) {
 458		DRM_ERROR("hardware doesn't support GPU sector layout\n");
 459		return -EINVAL;
 460	}
 461
 462	/*
 463	 * Tegra doesn't support different strides for U and V planes so we
 464	 * error out if the user tries to display a framebuffer with such a
 465	 * configuration.
 466	 */
 467	if (new_plane_state->fb->format->num_planes > 2) {
 468		if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
 469			DRM_ERROR("unsupported UV-plane configuration\n");
 470			return -EINVAL;
 471		}
 472	}
 473
 474	/* XXX scaling is not yet supported, add a check here */
 475
 476	err = tegra_plane_state_add(&tegra->base, new_plane_state);
 477	if (err < 0)
 478		return err;
 479
 480	return 0;
 481}
 482
 483static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
 484					      struct drm_atomic_state *state)
 485{
 486	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 487									   plane);
 488	struct tegra_plane *p = to_tegra_plane(plane);
 489	struct tegra_dc *dc;
 490	u32 value;
 491	int err;
 492
 493	/* rien ne va plus */
 494	if (!old_state || !old_state->crtc)
 495		return;
 496
 497	dc = to_tegra_dc(old_state->crtc);
 498
 499	err = host1x_client_resume(&dc->client);
 500	if (err < 0) {
 501		dev_err(dc->dev, "failed to resume: %d\n", err);
 502		return;
 503	}
 504
 505	/*
 506	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
 507	 * on planes that are already disabled. Make sure we fallback to the
 508	 * head for this particular state instead of crashing.
 509	 */
 510	if (WARN_ON(p->dc == NULL))
 511		p->dc = dc;
 512
 
 
 513	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
 514	value &= ~WIN_ENABLE;
 515	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
 516
 517	tegra_dc_remove_shared_plane(dc, p);
 518
 519	host1x_client_suspend(&dc->client);
 520}
 521
 522static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
 523{
 524	u64 tmp, tmp1;
 525
 526	tmp = (u64)dfixed_trunc(in);
 527	tmp1 = (tmp << NFB) + ((u64)out >> 1);
 528	do_div(tmp1, out);
 529
 530	return lower_32_bits(tmp1);
 531}
 532
 533static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
 534					     struct drm_atomic_state *state)
 535{
 536	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 537									   plane);
 538	struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
 539	struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
 540	unsigned int zpos = new_state->normalized_zpos;
 541	struct drm_framebuffer *fb = new_state->fb;
 542	struct tegra_plane *p = to_tegra_plane(plane);
 543	u32 value, min_width, bypass = 0;
 544	dma_addr_t base, addr_flag = 0;
 545	unsigned int bpc, planes;
 546	bool yuv;
 547	int err;
 548
 549	/* rien ne va plus */
 550	if (!new_state->crtc || !new_state->fb)
 551		return;
 552
 553	if (!new_state->visible) {
 554		tegra_shared_plane_atomic_disable(plane, state);
 555		return;
 556	}
 557
 558	err = host1x_client_resume(&dc->client);
 559	if (err < 0) {
 560		dev_err(dc->dev, "failed to resume: %d\n", err);
 561		return;
 562	}
 563
 564	yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
 565
 566	tegra_dc_assign_shared_plane(dc, p);
 567
 568	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
 569
 570	/* blending */
 571	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
 572		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
 573		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
 574	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
 575
 576	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
 577		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
 578		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
 579	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
 580
 581	value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
 582	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
 583
 584	/* scaling */
 585	min_width = min(new_state->src_w >> 16, new_state->crtc_w);
 586
 587	value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
 588
 589	if (min_width < MAX_PIXELS_5TAP444(value)) {
 590		value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
 591	} else {
 592		value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
 593
 594		if (min_width < MAX_PIXELS_2TAP444(value))
 595			value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
 596		else
 597			dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
 598	}
 599
 600	value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
 601	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
 602
 603	if (new_state->src_w != new_state->crtc_w << 16) {
 604		fixed20_12 width = dfixed_init(new_state->src_w >> 16);
 605		u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
 606		u32 init = (1 << (NFB - 1)) + (incr >> 1);
 607
 608		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
 609		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
 610	} else {
 611		bypass |= INPUT_SCALER_HBYPASS;
 612	}
 613
 614	if (new_state->src_h != new_state->crtc_h << 16) {
 615		fixed20_12 height = dfixed_init(new_state->src_h >> 16);
 616		u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
 617		u32 init = (1 << (NFB - 1)) + (incr >> 1);
 618
 619		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
 620		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
 621	} else {
 622		bypass |= INPUT_SCALER_VBYPASS;
 623	}
 624
 625	tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
 626
 627	/* disable compression */
 628	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
 629
 630#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 631	/*
 632	 * Physical address bit 39 in Tegra194 is used as a switch for special
 633	 * logic that swizzles the memory using either the legacy Tegra or the
 634	 * dGPU sector layout.
 635	 */
 636	if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
 637		addr_flag = BIT_ULL(39);
 638#endif
 639
 640	base = tegra_plane_state->iova[0] + fb->offsets[0];
 641	base |= addr_flag;
 642
 643	tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
 644	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
 645
 646	value = V_POSITION(new_state->crtc_y) |
 647		H_POSITION(new_state->crtc_x);
 648	tegra_plane_writel(p, value, DC_WIN_POSITION);
 649
 650	value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
 651	tegra_plane_writel(p, value, DC_WIN_SIZE);
 652
 653	value = WIN_ENABLE | COLOR_EXPAND;
 654	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
 655
 656	value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
 657	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
 658
 659	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
 660	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
 661
 662	value = PITCH(fb->pitches[0]);
 663	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
 664
 665	if (yuv && planes > 1) {
 666		base = tegra_plane_state->iova[1] + fb->offsets[1];
 667		base |= addr_flag;
 668
 669		tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
 670		tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
 671
 672		if (planes > 2) {
 673			base = tegra_plane_state->iova[2] + fb->offsets[2];
 674			base |= addr_flag;
 675
 676			tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
 677			tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
 678		}
 679
 680		value = PITCH_U(fb->pitches[1]);
 681
 682		if (planes > 2)
 683			value |= PITCH_V(fb->pitches[2]);
 684
 685		tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
 686	} else {
 687		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
 688		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
 689		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
 690		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
 691		tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
 692	}
 693
 694	value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
 695
 696	if (yuv) {
 697		if (bpc < 12)
 698			value |= DEGAMMA_YUV8_10;
 699		else
 700			value |= DEGAMMA_YUV12;
 701
 702		/* XXX parameterize */
 703		value |= COLOR_SPACE_YUV_2020;
 704	} else {
 705		if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
 706			value |= DEGAMMA_SRGB;
 707	}
 708
 709	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
 710
 711	value = OFFSET_X(new_state->src_y >> 16) |
 712		OFFSET_Y(new_state->src_x >> 16);
 713	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
 714
 715	if (dc->soc->supports_block_linear) {
 716		unsigned long height = tegra_plane_state->tiling.value;
 717
 718		/* XXX */
 719		switch (tegra_plane_state->tiling.mode) {
 720		case TEGRA_BO_TILING_MODE_PITCH:
 721			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
 722				DC_WINBUF_SURFACE_KIND_PITCH;
 723			break;
 724
 725		/* XXX not supported on Tegra186 and later */
 726		case TEGRA_BO_TILING_MODE_TILED:
 727			value = DC_WINBUF_SURFACE_KIND_TILED;
 728			break;
 729
 730		case TEGRA_BO_TILING_MODE_BLOCK:
 731			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
 732				DC_WINBUF_SURFACE_KIND_BLOCK;
 733			break;
 734		}
 735
 736		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
 737	}
 738
 739	/* disable gamut CSC */
 740	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
 741	value &= ~CONTROL_CSC_ENABLE;
 742	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
 743
 744	host1x_client_suspend(&dc->client);
 745}
 746
 747static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
 748	.prepare_fb = tegra_plane_prepare_fb,
 749	.cleanup_fb = tegra_plane_cleanup_fb,
 750	.atomic_check = tegra_shared_plane_atomic_check,
 751	.atomic_update = tegra_shared_plane_atomic_update,
 752	.atomic_disable = tegra_shared_plane_atomic_disable,
 753};
 754
 755struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
 756					    struct tegra_dc *dc,
 757					    unsigned int wgrp,
 758					    unsigned int index)
 759{
 760	enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
 761	struct tegra_drm *tegra = drm->dev_private;
 762	struct tegra_display_hub *hub = tegra->hub;
 
 
 763	struct tegra_shared_plane *plane;
 764	unsigned int possible_crtcs;
 765	unsigned int num_formats;
 766	const u64 *modifiers;
 767	struct drm_plane *p;
 768	const u32 *formats;
 769	int err;
 770
 771	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
 772	if (!plane)
 773		return ERR_PTR(-ENOMEM);
 774
 775	plane->base.offset = 0x0a00 + 0x0300 * index;
 776	plane->base.index = index;
 777
 778	plane->wgrp = &hub->wgrps[wgrp];
 779	plane->wgrp->parent = &dc->client;
 780
 781	p = &plane->base.base;
 782
 783	/* planes can be assigned to arbitrary CRTCs */
 784	possible_crtcs = BIT(tegra->num_crtcs) - 1;
 785
 786	num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
 787	formats = tegra_shared_plane_formats;
 788	modifiers = tegra_shared_plane_modifiers;
 789
 790	err = drm_universal_plane_init(drm, p, possible_crtcs,
 791				       &tegra_plane_funcs, formats,
 792				       num_formats, modifiers, type, NULL);
 793	if (err < 0) {
 794		kfree(plane);
 795		return ERR_PTR(err);
 796	}
 797
 798	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
 799	drm_plane_create_zpos_property(p, 0, 0, 255);
 800
 801	return p;
 802}
 803
 804static struct drm_private_state *
 805tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
 806{
 807	struct tegra_display_hub_state *state;
 808
 809	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
 810	if (!state)
 811		return NULL;
 812
 813	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
 814
 815	return &state->base;
 816}
 817
 818static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
 819					    struct drm_private_state *state)
 820{
 821	struct tegra_display_hub_state *hub_state =
 822		to_tegra_display_hub_state(state);
 823
 824	kfree(hub_state);
 825}
 826
 827static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
 828	.atomic_duplicate_state = tegra_display_hub_duplicate_state,
 829	.atomic_destroy_state = tegra_display_hub_destroy_state,
 830};
 831
 832static struct tegra_display_hub_state *
 833tegra_display_hub_get_state(struct tegra_display_hub *hub,
 834			    struct drm_atomic_state *state)
 835{
 
 836	struct drm_private_state *priv;
 837
 
 
 838	priv = drm_atomic_get_private_obj_state(state, &hub->base);
 839	if (IS_ERR(priv))
 840		return ERR_CAST(priv);
 841
 842	return to_tegra_display_hub_state(priv);
 843}
 844
 845int tegra_display_hub_atomic_check(struct drm_device *drm,
 846				   struct drm_atomic_state *state)
 847{
 848	struct tegra_drm *tegra = drm->dev_private;
 849	struct tegra_display_hub_state *hub_state;
 850	struct drm_crtc_state *old, *new;
 851	struct drm_crtc *crtc;
 852	unsigned int i;
 853
 854	if (!tegra->hub)
 855		return 0;
 856
 857	hub_state = tegra_display_hub_get_state(tegra->hub, state);
 858	if (IS_ERR(hub_state))
 859		return PTR_ERR(hub_state);
 860
 861	/*
 862	 * The display hub display clock needs to be fed by the display clock
 863	 * with the highest frequency to ensure proper functioning of all the
 864	 * displays.
 865	 *
 866	 * Note that this isn't used before Tegra186, but it doesn't hurt and
 867	 * conditionalizing it would make the code less clean.
 868	 */
 869	for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
 870		struct tegra_dc_state *dc = to_dc_state(new);
 871
 872		if (new->active) {
 873			if (!hub_state->clk || dc->pclk > hub_state->rate) {
 874				hub_state->dc = to_tegra_dc(dc->base.crtc);
 875				hub_state->clk = hub_state->dc->clk;
 876				hub_state->rate = dc->pclk;
 877			}
 878		}
 879	}
 880
 881	return 0;
 882}
 883
 884static void tegra_display_hub_update(struct tegra_dc *dc)
 885{
 886	u32 value;
 887	int err;
 888
 889	err = host1x_client_resume(&dc->client);
 890	if (err < 0) {
 891		dev_err(dc->dev, "failed to resume: %d\n", err);
 892		return;
 893	}
 894
 895	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
 896	value &= ~LATENCY_EVENT;
 897	tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
 898
 899	value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
 900	value = CURS_SLOTS(1) | WGRP_SLOTS(1);
 901	tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
 902
 903	tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
 904	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 905	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
 906	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 907
 908	host1x_client_suspend(&dc->client);
 909}
 910
 911void tegra_display_hub_atomic_commit(struct drm_device *drm,
 912				     struct drm_atomic_state *state)
 913{
 914	struct tegra_drm *tegra = drm->dev_private;
 915	struct tegra_display_hub *hub = tegra->hub;
 916	struct tegra_display_hub_state *hub_state;
 917	struct device *dev = hub->client.dev;
 918	int err;
 919
 920	hub_state = to_tegra_display_hub_state(hub->base.state);
 921
 922	if (hub_state->clk) {
 923		err = clk_set_rate(hub_state->clk, hub_state->rate);
 924		if (err < 0)
 925			dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
 926				hub_state->clk, hub_state->rate);
 927
 928		err = clk_set_parent(hub->clk_disp, hub_state->clk);
 929		if (err < 0)
 930			dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
 931				hub->clk_disp, hub_state->clk, err);
 932	}
 933
 934	if (hub_state->dc)
 935		tegra_display_hub_update(hub_state->dc);
 936}
 937
 938static int tegra_display_hub_init(struct host1x_client *client)
 939{
 940	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 941	struct drm_device *drm = dev_get_drvdata(client->host);
 942	struct tegra_drm *tegra = drm->dev_private;
 943	struct tegra_display_hub_state *state;
 944
 945	state = kzalloc(sizeof(*state), GFP_KERNEL);
 946	if (!state)
 947		return -ENOMEM;
 948
 949	drm_atomic_private_obj_init(drm, &hub->base, &state->base,
 950				    &tegra_display_hub_state_funcs);
 951
 952	tegra->hub = hub;
 953
 954	return 0;
 955}
 956
 957static int tegra_display_hub_exit(struct host1x_client *client)
 958{
 959	struct drm_device *drm = dev_get_drvdata(client->host);
 960	struct tegra_drm *tegra = drm->dev_private;
 961
 962	drm_atomic_private_obj_fini(&tegra->hub->base);
 963	tegra->hub = NULL;
 964
 965	return 0;
 966}
 967
 968static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
 969{
 970	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 971	struct device *dev = client->dev;
 972	unsigned int i = hub->num_heads;
 973	int err;
 974
 975	err = reset_control_assert(hub->rst);
 976	if (err < 0)
 977		return err;
 978
 979	while (i--)
 980		clk_disable_unprepare(hub->clk_heads[i]);
 981
 982	clk_disable_unprepare(hub->clk_hub);
 983	clk_disable_unprepare(hub->clk_dsc);
 984	clk_disable_unprepare(hub->clk_disp);
 985
 986	pm_runtime_put_sync(dev);
 987
 988	return 0;
 989}
 990
 991static int tegra_display_hub_runtime_resume(struct host1x_client *client)
 992{
 993	struct tegra_display_hub *hub = to_tegra_display_hub(client);
 994	struct device *dev = client->dev;
 995	unsigned int i;
 996	int err;
 997
 998	err = pm_runtime_resume_and_get(dev);
 999	if (err < 0) {
1000		dev_err(dev, "failed to get runtime PM: %d\n", err);
1001		return err;
1002	}
1003
1004	err = clk_prepare_enable(hub->clk_disp);
1005	if (err < 0)
1006		goto put_rpm;
1007
1008	err = clk_prepare_enable(hub->clk_dsc);
1009	if (err < 0)
1010		goto disable_disp;
1011
1012	err = clk_prepare_enable(hub->clk_hub);
1013	if (err < 0)
1014		goto disable_dsc;
1015
1016	for (i = 0; i < hub->num_heads; i++) {
1017		err = clk_prepare_enable(hub->clk_heads[i]);
1018		if (err < 0)
1019			goto disable_heads;
1020	}
1021
1022	err = reset_control_deassert(hub->rst);
1023	if (err < 0)
1024		goto disable_heads;
1025
1026	return 0;
1027
1028disable_heads:
1029	while (i--)
1030		clk_disable_unprepare(hub->clk_heads[i]);
1031
1032	clk_disable_unprepare(hub->clk_hub);
1033disable_dsc:
1034	clk_disable_unprepare(hub->clk_dsc);
1035disable_disp:
1036	clk_disable_unprepare(hub->clk_disp);
1037put_rpm:
1038	pm_runtime_put_sync(dev);
1039	return err;
1040}
1041
1042static const struct host1x_client_ops tegra_display_hub_ops = {
1043	.init = tegra_display_hub_init,
1044	.exit = tegra_display_hub_exit,
1045	.suspend = tegra_display_hub_runtime_suspend,
1046	.resume = tegra_display_hub_runtime_resume,
1047};
1048
1049static int tegra_display_hub_probe(struct platform_device *pdev)
1050{
1051	u64 dma_mask = dma_get_mask(pdev->dev.parent);
1052	struct device_node *child = NULL;
1053	struct tegra_display_hub *hub;
1054	struct clk *clk;
1055	unsigned int i;
1056	int err;
1057
1058	err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1059	if (err < 0) {
1060		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1061		return err;
1062	}
1063
1064	hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1065	if (!hub)
1066		return -ENOMEM;
1067
1068	hub->soc = of_device_get_match_data(&pdev->dev);
1069
1070	hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1071	if (IS_ERR(hub->clk_disp)) {
1072		err = PTR_ERR(hub->clk_disp);
1073		return err;
1074	}
1075
1076	if (hub->soc->supports_dsc) {
1077		hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1078		if (IS_ERR(hub->clk_dsc)) {
1079			err = PTR_ERR(hub->clk_dsc);
1080			return err;
1081		}
1082	}
1083
1084	hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1085	if (IS_ERR(hub->clk_hub)) {
1086		err = PTR_ERR(hub->clk_hub);
1087		return err;
1088	}
1089
1090	hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1091	if (IS_ERR(hub->rst)) {
1092		err = PTR_ERR(hub->rst);
1093		return err;
1094	}
1095
1096	hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1097				  sizeof(*hub->wgrps), GFP_KERNEL);
1098	if (!hub->wgrps)
1099		return -ENOMEM;
1100
1101	for (i = 0; i < hub->soc->num_wgrps; i++) {
1102		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1103		char id[16];
1104
1105		snprintf(id, sizeof(id), "wgrp%u", i);
1106		mutex_init(&wgrp->lock);
1107		wgrp->usecount = 0;
1108		wgrp->index = i;
1109
1110		wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1111		if (IS_ERR(wgrp->rst))
1112			return PTR_ERR(wgrp->rst);
1113
1114		err = reset_control_assert(wgrp->rst);
1115		if (err < 0)
1116			return err;
1117	}
1118
1119	hub->num_heads = of_get_child_count(pdev->dev.of_node);
1120
1121	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1122				      GFP_KERNEL);
1123	if (!hub->clk_heads)
1124		return -ENOMEM;
1125
1126	for (i = 0; i < hub->num_heads; i++) {
1127		child = of_get_next_child(pdev->dev.of_node, child);
1128		if (!child) {
1129			dev_err(&pdev->dev, "failed to find node for head %u\n",
1130				i);
1131			return -ENODEV;
1132		}
1133
1134		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1135		if (IS_ERR(clk)) {
1136			dev_err(&pdev->dev, "failed to get clock for head %u\n",
1137				i);
1138			of_node_put(child);
1139			return PTR_ERR(clk);
1140		}
1141
1142		hub->clk_heads[i] = clk;
1143	}
1144
1145	of_node_put(child);
1146
1147	/* XXX: enable clock across reset? */
1148	err = reset_control_assert(hub->rst);
1149	if (err < 0)
1150		return err;
1151
1152	platform_set_drvdata(pdev, hub);
1153	pm_runtime_enable(&pdev->dev);
1154
1155	INIT_LIST_HEAD(&hub->client.list);
1156	hub->client.ops = &tegra_display_hub_ops;
1157	hub->client.dev = &pdev->dev;
1158
1159	err = host1x_client_register(&hub->client);
1160	if (err < 0)
1161		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1162			err);
1163
1164	err = devm_of_platform_populate(&pdev->dev);
1165	if (err < 0)
1166		goto unregister;
 
 
 
 
1167
1168	return err;
 
 
 
 
1169
1170unregister:
1171	host1x_client_unregister(&hub->client);
1172	pm_runtime_disable(&pdev->dev);
 
1173	return err;
1174}
1175
1176static void tegra_display_hub_remove(struct platform_device *pdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1177{
1178	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1179	unsigned int i;
 
 
 
 
 
1180
1181	host1x_client_unregister(&hub->client);
 
 
1182
1183	for (i = 0; i < hub->soc->num_wgrps; i++) {
1184		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 
1185
1186		mutex_destroy(&wgrp->lock);
 
 
 
1187	}
1188
1189	pm_runtime_disable(&pdev->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190}
1191
 
 
 
 
 
1192static const struct tegra_display_hub_soc tegra186_display_hub = {
1193	.num_wgrps = 6,
1194	.supports_dsc = true,
1195};
1196
1197static const struct tegra_display_hub_soc tegra194_display_hub = {
1198	.num_wgrps = 6,
1199	.supports_dsc = false,
1200};
1201
1202static const struct of_device_id tegra_display_hub_of_match[] = {
1203	{
1204		.compatible = "nvidia,tegra194-display",
1205		.data = &tegra194_display_hub
1206	}, {
1207		.compatible = "nvidia,tegra186-display",
1208		.data = &tegra186_display_hub
1209	}, {
1210		/* sentinel */
1211	}
1212};
1213MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1214
1215struct platform_driver tegra_display_hub_driver = {
1216	.driver = {
1217		.name = "tegra-display-hub",
1218		.of_match_table = tegra_display_hub_of_match,
 
1219	},
1220	.probe = tegra_display_hub_probe,
1221	.remove = tegra_display_hub_remove,
1222};