Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2013 Avionic Design GmbH
  4 * Copyright (C) 2013 NVIDIA Corporation
  5 */
  6
  7#include <linux/clk.h>
  8#include <linux/delay.h>
  9#include <linux/host1x.h>
 10#include <linux/iommu.h>
 11#include <linux/module.h>
 12#include <linux/of.h>
 13#include <linux/platform_device.h>
 14#include <linux/pm_domain.h>
 15#include <linux/pm_opp.h>
 16#include <linux/pm_runtime.h>
 17#include <linux/reset.h>
 18
 19#include <soc/tegra/common.h>
 20#include <soc/tegra/pmc.h>
 21
 22#include "drm.h"
 23#include "gem.h"
 24#include "gr3d.h"
 25
 26enum {
 27	RST_MC,
 28	RST_GR3D,
 29	RST_MC2,
 30	RST_GR3D2,
 31	RST_GR3D_MAX,
 32};
 33
 34struct gr3d_soc {
 35	unsigned int version;
 36	unsigned int num_clocks;
 37	unsigned int num_resets;
 38};
 39
 40struct gr3d {
 41	struct tegra_drm_client client;
 42	struct host1x_channel *channel;
 43
 44	const struct gr3d_soc *soc;
 45	struct clk_bulk_data *clocks;
 46	unsigned int nclocks;
 47	struct reset_control_bulk_data resets[RST_GR3D_MAX];
 48	unsigned int nresets;
 49	struct dev_pm_domain_list *pd_list;
 50
 51	DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
 52};
 53
 54static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
 55{
 56	return container_of(client, struct gr3d, client);
 57}
 58
 59static int gr3d_init(struct host1x_client *client)
 60{
 61	struct tegra_drm_client *drm = host1x_to_drm_client(client);
 62	struct drm_device *dev = dev_get_drvdata(client->host);
 63	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
 64	struct gr3d *gr3d = to_gr3d(drm);
 65	int err;
 66
 67	gr3d->channel = host1x_channel_request(client);
 68	if (!gr3d->channel)
 69		return -ENOMEM;
 70
 71	client->syncpts[0] = host1x_syncpt_request(client, flags);
 72	if (!client->syncpts[0]) {
 73		err = -ENOMEM;
 74		dev_err(client->dev, "failed to request syncpoint: %d\n", err);
 75		goto put;
 76	}
 77
 78	err = host1x_client_iommu_attach(client);
 79	if (err < 0) {
 80		dev_err(client->dev, "failed to attach to domain: %d\n", err);
 81		goto free;
 82	}
 83
 84	err = tegra_drm_register_client(dev->dev_private, drm);
 85	if (err < 0) {
 86		dev_err(client->dev, "failed to register client: %d\n", err);
 87		goto detach_iommu;
 88	}
 89
 90	return 0;
 91
 92detach_iommu:
 93	host1x_client_iommu_detach(client);
 94free:
 95	host1x_syncpt_put(client->syncpts[0]);
 96put:
 97	host1x_channel_put(gr3d->channel);
 98	return err;
 99}
100
101static int gr3d_exit(struct host1x_client *client)
102{
103	struct tegra_drm_client *drm = host1x_to_drm_client(client);
104	struct drm_device *dev = dev_get_drvdata(client->host);
105	struct gr3d *gr3d = to_gr3d(drm);
106	int err;
107
108	err = tegra_drm_unregister_client(dev->dev_private, drm);
109	if (err < 0)
110		return err;
111
112	pm_runtime_dont_use_autosuspend(client->dev);
113	pm_runtime_force_suspend(client->dev);
114
115	host1x_client_iommu_detach(client);
116	host1x_syncpt_put(client->syncpts[0]);
117	host1x_channel_put(gr3d->channel);
118
119	gr3d->channel = NULL;
120
121	return 0;
122}
123
124static const struct host1x_client_ops gr3d_client_ops = {
125	.init = gr3d_init,
126	.exit = gr3d_exit,
127};
128
129static int gr3d_open_channel(struct tegra_drm_client *client,
130			     struct tegra_drm_context *context)
131{
132	struct gr3d *gr3d = to_gr3d(client);
133
134	context->channel = host1x_channel_get(gr3d->channel);
135	if (!context->channel)
136		return -ENOMEM;
137
138	return 0;
139}
140
141static void gr3d_close_channel(struct tegra_drm_context *context)
142{
143	host1x_channel_put(context->channel);
144}
145
146static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
147{
148	struct gr3d *gr3d = dev_get_drvdata(dev);
149
150	switch (class) {
151	case HOST1X_CLASS_HOST1X:
152		if (offset == 0x2b)
153			return 1;
154
155		break;
156
157	case HOST1X_CLASS_GR3D:
158		if (offset >= GR3D_NUM_REGS)
159			break;
160
161		if (test_bit(offset, gr3d->addr_regs))
162			return 1;
163
164		break;
165	}
166
167	return 0;
168}
169
170static const struct tegra_drm_client_ops gr3d_ops = {
171	.open_channel = gr3d_open_channel,
172	.close_channel = gr3d_close_channel,
173	.is_addr_reg = gr3d_is_addr_reg,
174	.submit = tegra_drm_submit,
175};
176
177static const struct gr3d_soc tegra20_gr3d_soc = {
178	.version = 0x20,
179	.num_clocks = 1,
180	.num_resets = 2,
181};
182
183static const struct gr3d_soc tegra30_gr3d_soc = {
184	.version = 0x30,
185	.num_clocks = 2,
186	.num_resets = 4,
187};
188
189static const struct gr3d_soc tegra114_gr3d_soc = {
190	.version = 0x35,
191	.num_clocks = 1,
192	.num_resets = 2,
193};
194
195static const struct of_device_id tegra_gr3d_match[] = {
196	{ .compatible = "nvidia,tegra114-gr3d", .data = &tegra114_gr3d_soc },
197	{ .compatible = "nvidia,tegra30-gr3d", .data = &tegra30_gr3d_soc },
198	{ .compatible = "nvidia,tegra20-gr3d", .data = &tegra20_gr3d_soc },
199	{ }
200};
201MODULE_DEVICE_TABLE(of, tegra_gr3d_match);
202
203static const u32 gr3d_addr_regs[] = {
204	GR3D_IDX_ATTRIBUTE( 0),
205	GR3D_IDX_ATTRIBUTE( 1),
206	GR3D_IDX_ATTRIBUTE( 2),
207	GR3D_IDX_ATTRIBUTE( 3),
208	GR3D_IDX_ATTRIBUTE( 4),
209	GR3D_IDX_ATTRIBUTE( 5),
210	GR3D_IDX_ATTRIBUTE( 6),
211	GR3D_IDX_ATTRIBUTE( 7),
212	GR3D_IDX_ATTRIBUTE( 8),
213	GR3D_IDX_ATTRIBUTE( 9),
214	GR3D_IDX_ATTRIBUTE(10),
215	GR3D_IDX_ATTRIBUTE(11),
216	GR3D_IDX_ATTRIBUTE(12),
217	GR3D_IDX_ATTRIBUTE(13),
218	GR3D_IDX_ATTRIBUTE(14),
219	GR3D_IDX_ATTRIBUTE(15),
220	GR3D_IDX_INDEX_BASE,
221	GR3D_QR_ZTAG_ADDR,
222	GR3D_QR_CTAG_ADDR,
223	GR3D_QR_CZ_ADDR,
224	GR3D_TEX_TEX_ADDR( 0),
225	GR3D_TEX_TEX_ADDR( 1),
226	GR3D_TEX_TEX_ADDR( 2),
227	GR3D_TEX_TEX_ADDR( 3),
228	GR3D_TEX_TEX_ADDR( 4),
229	GR3D_TEX_TEX_ADDR( 5),
230	GR3D_TEX_TEX_ADDR( 6),
231	GR3D_TEX_TEX_ADDR( 7),
232	GR3D_TEX_TEX_ADDR( 8),
233	GR3D_TEX_TEX_ADDR( 9),
234	GR3D_TEX_TEX_ADDR(10),
235	GR3D_TEX_TEX_ADDR(11),
236	GR3D_TEX_TEX_ADDR(12),
237	GR3D_TEX_TEX_ADDR(13),
238	GR3D_TEX_TEX_ADDR(14),
239	GR3D_TEX_TEX_ADDR(15),
240	GR3D_DW_MEMORY_OUTPUT_ADDRESS,
241	GR3D_GLOBAL_SURFADDR( 0),
242	GR3D_GLOBAL_SURFADDR( 1),
243	GR3D_GLOBAL_SURFADDR( 2),
244	GR3D_GLOBAL_SURFADDR( 3),
245	GR3D_GLOBAL_SURFADDR( 4),
246	GR3D_GLOBAL_SURFADDR( 5),
247	GR3D_GLOBAL_SURFADDR( 6),
248	GR3D_GLOBAL_SURFADDR( 7),
249	GR3D_GLOBAL_SURFADDR( 8),
250	GR3D_GLOBAL_SURFADDR( 9),
251	GR3D_GLOBAL_SURFADDR(10),
252	GR3D_GLOBAL_SURFADDR(11),
253	GR3D_GLOBAL_SURFADDR(12),
254	GR3D_GLOBAL_SURFADDR(13),
255	GR3D_GLOBAL_SURFADDR(14),
256	GR3D_GLOBAL_SURFADDR(15),
257	GR3D_GLOBAL_SPILLSURFADDR,
258	GR3D_GLOBAL_SURFOVERADDR( 0),
259	GR3D_GLOBAL_SURFOVERADDR( 1),
260	GR3D_GLOBAL_SURFOVERADDR( 2),
261	GR3D_GLOBAL_SURFOVERADDR( 3),
262	GR3D_GLOBAL_SURFOVERADDR( 4),
263	GR3D_GLOBAL_SURFOVERADDR( 5),
264	GR3D_GLOBAL_SURFOVERADDR( 6),
265	GR3D_GLOBAL_SURFOVERADDR( 7),
266	GR3D_GLOBAL_SURFOVERADDR( 8),
267	GR3D_GLOBAL_SURFOVERADDR( 9),
268	GR3D_GLOBAL_SURFOVERADDR(10),
269	GR3D_GLOBAL_SURFOVERADDR(11),
270	GR3D_GLOBAL_SURFOVERADDR(12),
271	GR3D_GLOBAL_SURFOVERADDR(13),
272	GR3D_GLOBAL_SURFOVERADDR(14),
273	GR3D_GLOBAL_SURFOVERADDR(15),
274	GR3D_GLOBAL_SAMP01SURFADDR( 0),
275	GR3D_GLOBAL_SAMP01SURFADDR( 1),
276	GR3D_GLOBAL_SAMP01SURFADDR( 2),
277	GR3D_GLOBAL_SAMP01SURFADDR( 3),
278	GR3D_GLOBAL_SAMP01SURFADDR( 4),
279	GR3D_GLOBAL_SAMP01SURFADDR( 5),
280	GR3D_GLOBAL_SAMP01SURFADDR( 6),
281	GR3D_GLOBAL_SAMP01SURFADDR( 7),
282	GR3D_GLOBAL_SAMP01SURFADDR( 8),
283	GR3D_GLOBAL_SAMP01SURFADDR( 9),
284	GR3D_GLOBAL_SAMP01SURFADDR(10),
285	GR3D_GLOBAL_SAMP01SURFADDR(11),
286	GR3D_GLOBAL_SAMP01SURFADDR(12),
287	GR3D_GLOBAL_SAMP01SURFADDR(13),
288	GR3D_GLOBAL_SAMP01SURFADDR(14),
289	GR3D_GLOBAL_SAMP01SURFADDR(15),
290	GR3D_GLOBAL_SAMP23SURFADDR( 0),
291	GR3D_GLOBAL_SAMP23SURFADDR( 1),
292	GR3D_GLOBAL_SAMP23SURFADDR( 2),
293	GR3D_GLOBAL_SAMP23SURFADDR( 3),
294	GR3D_GLOBAL_SAMP23SURFADDR( 4),
295	GR3D_GLOBAL_SAMP23SURFADDR( 5),
296	GR3D_GLOBAL_SAMP23SURFADDR( 6),
297	GR3D_GLOBAL_SAMP23SURFADDR( 7),
298	GR3D_GLOBAL_SAMP23SURFADDR( 8),
299	GR3D_GLOBAL_SAMP23SURFADDR( 9),
300	GR3D_GLOBAL_SAMP23SURFADDR(10),
301	GR3D_GLOBAL_SAMP23SURFADDR(11),
302	GR3D_GLOBAL_SAMP23SURFADDR(12),
303	GR3D_GLOBAL_SAMP23SURFADDR(13),
304	GR3D_GLOBAL_SAMP23SURFADDR(14),
305	GR3D_GLOBAL_SAMP23SURFADDR(15),
306};
307
308static int gr3d_power_up_legacy_domain(struct device *dev, const char *name,
309				       unsigned int id)
310{
311	struct gr3d *gr3d = dev_get_drvdata(dev);
312	struct reset_control *reset;
313	struct clk *clk;
314	unsigned int i;
315	int err;
316
317	/*
318	 * Tegra20 device-tree doesn't specify 3d clock name and there is only
319	 * one clock for Tegra20. Tegra30+ device-trees always specified names
320	 * for the clocks.
321	 */
322	if (gr3d->nclocks == 1) {
323		if (id == TEGRA_POWERGATE_3D1)
324			return 0;
325
326		clk = gr3d->clocks[0].clk;
327	} else {
328		for (i = 0; i < gr3d->nclocks; i++) {
329			if (WARN_ON(!gr3d->clocks[i].id))
330				continue;
331
332			if (!strcmp(gr3d->clocks[i].id, name)) {
333				clk = gr3d->clocks[i].clk;
334				break;
335			}
336		}
337
338		if (WARN_ON(i == gr3d->nclocks))
339			return -EINVAL;
340	}
341
342	/*
343	 * We use array of resets, which includes MC resets, and MC
344	 * reset shouldn't be asserted while hardware is gated because
345	 * MC flushing will fail for gated hardware. Hence for legacy
346	 * PD we request the individual reset separately.
347	 */
348	reset = reset_control_get_exclusive_released(dev, name);
349	if (IS_ERR(reset))
350		return PTR_ERR(reset);
351
352	err = reset_control_acquire(reset);
353	if (err) {
354		dev_err(dev, "failed to acquire %s reset: %d\n", name, err);
355	} else {
356		err = tegra_powergate_sequence_power_up(id, clk, reset);
357		reset_control_release(reset);
358	}
359
360	reset_control_put(reset);
361	if (err)
362		return err;
363
364	/*
365	 * tegra_powergate_sequence_power_up() leaves clocks enabled,
366	 * while GENPD not. Hence keep clock-enable balanced.
367	 */
368	clk_disable_unprepare(clk);
369
370	return 0;
371}
372
 
 
 
 
 
373static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
374{
375	struct dev_pm_domain_attach_data pd_data = {
376		.pd_names = (const char *[]) { "3d0", "3d1" },
377		.num_pd_names = 2,
378		.pd_flags = PD_FLAG_REQUIRED_OPP,
379	};
380	int err;
381
382	err = of_count_phandle_with_args(dev->of_node, "power-domains",
383					 "#power-domain-cells");
384	if (err < 0) {
385		if (err != -ENOENT)
386			return err;
387
388		/*
389		 * Older device-trees don't use GENPD. In this case we should
390		 * toggle power domain manually.
391		 */
392		err = gr3d_power_up_legacy_domain(dev, "3d",
393						  TEGRA_POWERGATE_3D);
394		if (err)
395			return err;
396
397		err = gr3d_power_up_legacy_domain(dev, "3d2",
398						  TEGRA_POWERGATE_3D1);
399		if (err)
400			return err;
401
402		return 0;
403	}
404
405	/*
406	 * The PM domain core automatically attaches a single power domain,
407	 * otherwise it skips attaching completely. We have a single domain
408	 * on Tegra20 and two domains on Tegra30+.
409	 */
410	if (dev->pm_domain)
411		return 0;
412
413	err = devm_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list);
414	if (err < 0)
415		return err;
416
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417	return 0;
418}
419
420static int gr3d_get_clocks(struct device *dev, struct gr3d *gr3d)
421{
422	int err;
423
424	err = devm_clk_bulk_get_all(dev, &gr3d->clocks);
425	if (err < 0) {
426		dev_err(dev, "failed to get clock: %d\n", err);
427		return err;
428	}
429	gr3d->nclocks = err;
430
431	if (gr3d->nclocks != gr3d->soc->num_clocks) {
432		dev_err(dev, "invalid number of clocks: %u\n", gr3d->nclocks);
433		return -ENOENT;
434	}
435
436	return 0;
437}
438
439static int gr3d_get_resets(struct device *dev, struct gr3d *gr3d)
440{
441	int err;
442
443	gr3d->resets[RST_MC].id = "mc";
444	gr3d->resets[RST_MC2].id = "mc2";
445	gr3d->resets[RST_GR3D].id = "3d";
446	gr3d->resets[RST_GR3D2].id = "3d2";
447	gr3d->nresets = gr3d->soc->num_resets;
448
449	err = devm_reset_control_bulk_get_optional_exclusive_released(
450				dev, gr3d->nresets, gr3d->resets);
451	if (err) {
452		dev_err(dev, "failed to get reset: %d\n", err);
453		return err;
454	}
455
456	if (WARN_ON(!gr3d->resets[RST_GR3D].rstc) ||
457	    WARN_ON(!gr3d->resets[RST_GR3D2].rstc && gr3d->nresets == 4))
458		return -ENOENT;
459
460	return 0;
461}
462
463static int gr3d_probe(struct platform_device *pdev)
464{
465	struct host1x_syncpt **syncpts;
466	struct gr3d *gr3d;
467	unsigned int i;
468	int err;
469
470	gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
471	if (!gr3d)
472		return -ENOMEM;
473
474	platform_set_drvdata(pdev, gr3d);
475
476	gr3d->soc = of_device_get_match_data(&pdev->dev);
477
478	syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
479	if (!syncpts)
480		return -ENOMEM;
481
482	err = gr3d_get_clocks(&pdev->dev, gr3d);
483	if (err)
484		return err;
485
486	err = gr3d_get_resets(&pdev->dev, gr3d);
487	if (err)
488		return err;
489
490	err = gr3d_init_power(&pdev->dev, gr3d);
491	if (err)
492		return err;
493
494	INIT_LIST_HEAD(&gr3d->client.base.list);
495	gr3d->client.base.ops = &gr3d_client_ops;
496	gr3d->client.base.dev = &pdev->dev;
497	gr3d->client.base.class = HOST1X_CLASS_GR3D;
498	gr3d->client.base.syncpts = syncpts;
499	gr3d->client.base.num_syncpts = 1;
500
501	INIT_LIST_HEAD(&gr3d->client.list);
502	gr3d->client.version = gr3d->soc->version;
503	gr3d->client.ops = &gr3d_ops;
504
505	err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
506	if (err)
507		return err;
508
509	err = host1x_client_register(&gr3d->client.base);
510	if (err < 0) {
511		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
512			err);
513		return err;
514	}
515
516	/* initialize address register map */
517	for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
518		set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
519
520	return 0;
521}
522
523static void gr3d_remove(struct platform_device *pdev)
524{
525	struct gr3d *gr3d = platform_get_drvdata(pdev);
526
527	pm_runtime_disable(&pdev->dev);
528	host1x_client_unregister(&gr3d->client.base);
529}
530
531static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
532{
533	struct gr3d *gr3d = dev_get_drvdata(dev);
534	int err;
535
536	host1x_channel_stop(gr3d->channel);
537
538	err = reset_control_bulk_assert(gr3d->nresets, gr3d->resets);
539	if (err) {
540		dev_err(dev, "failed to assert reset: %d\n", err);
541		return err;
542	}
543
544	usleep_range(10, 20);
545
546	/*
547	 * Older device-trees don't specify MC resets and power-gating can't
548	 * be done safely in that case. Hence we will keep the power ungated
549	 * for older DTBs. For newer DTBs, GENPD will perform the power-gating.
550	 */
551
552	clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
553	reset_control_bulk_release(gr3d->nresets, gr3d->resets);
554
555	return 0;
556}
557
558static int __maybe_unused gr3d_runtime_resume(struct device *dev)
559{
560	struct gr3d *gr3d = dev_get_drvdata(dev);
561	int err;
562
563	err = reset_control_bulk_acquire(gr3d->nresets, gr3d->resets);
564	if (err) {
565		dev_err(dev, "failed to acquire reset: %d\n", err);
566		return err;
567	}
568
569	err = clk_bulk_prepare_enable(gr3d->nclocks, gr3d->clocks);
570	if (err) {
571		dev_err(dev, "failed to enable clock: %d\n", err);
572		goto release_reset;
573	}
574
575	err = reset_control_bulk_deassert(gr3d->nresets, gr3d->resets);
576	if (err) {
577		dev_err(dev, "failed to deassert reset: %d\n", err);
578		goto disable_clk;
579	}
580
581	pm_runtime_enable(dev);
582	pm_runtime_use_autosuspend(dev);
583	pm_runtime_set_autosuspend_delay(dev, 500);
584
585	return 0;
586
587disable_clk:
588	clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
589release_reset:
590	reset_control_bulk_release(gr3d->nresets, gr3d->resets);
591
592	return err;
593}
594
595static const struct dev_pm_ops tegra_gr3d_pm = {
596	SET_RUNTIME_PM_OPS(gr3d_runtime_suspend, gr3d_runtime_resume, NULL)
597	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
598				pm_runtime_force_resume)
599};
600
601struct platform_driver tegra_gr3d_driver = {
602	.driver = {
603		.name = "tegra-gr3d",
604		.of_match_table = tegra_gr3d_match,
605		.pm = &tegra_gr3d_pm,
606	},
607	.probe = gr3d_probe,
608	.remove = gr3d_remove,
609};
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2013 Avionic Design GmbH
  4 * Copyright (C) 2013 NVIDIA Corporation
  5 */
  6
  7#include <linux/clk.h>
  8#include <linux/delay.h>
  9#include <linux/host1x.h>
 10#include <linux/iommu.h>
 11#include <linux/module.h>
 12#include <linux/of.h>
 13#include <linux/platform_device.h>
 14#include <linux/pm_domain.h>
 15#include <linux/pm_opp.h>
 16#include <linux/pm_runtime.h>
 17#include <linux/reset.h>
 18
 19#include <soc/tegra/common.h>
 20#include <soc/tegra/pmc.h>
 21
 22#include "drm.h"
 23#include "gem.h"
 24#include "gr3d.h"
 25
 26enum {
 27	RST_MC,
 28	RST_GR3D,
 29	RST_MC2,
 30	RST_GR3D2,
 31	RST_GR3D_MAX,
 32};
 33
 34struct gr3d_soc {
 35	unsigned int version;
 36	unsigned int num_clocks;
 37	unsigned int num_resets;
 38};
 39
 40struct gr3d {
 41	struct tegra_drm_client client;
 42	struct host1x_channel *channel;
 43
 44	const struct gr3d_soc *soc;
 45	struct clk_bulk_data *clocks;
 46	unsigned int nclocks;
 47	struct reset_control_bulk_data resets[RST_GR3D_MAX];
 48	unsigned int nresets;
 
 49
 50	DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
 51};
 52
 53static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
 54{
 55	return container_of(client, struct gr3d, client);
 56}
 57
 58static int gr3d_init(struct host1x_client *client)
 59{
 60	struct tegra_drm_client *drm = host1x_to_drm_client(client);
 61	struct drm_device *dev = dev_get_drvdata(client->host);
 62	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
 63	struct gr3d *gr3d = to_gr3d(drm);
 64	int err;
 65
 66	gr3d->channel = host1x_channel_request(client);
 67	if (!gr3d->channel)
 68		return -ENOMEM;
 69
 70	client->syncpts[0] = host1x_syncpt_request(client, flags);
 71	if (!client->syncpts[0]) {
 72		err = -ENOMEM;
 73		dev_err(client->dev, "failed to request syncpoint: %d\n", err);
 74		goto put;
 75	}
 76
 77	err = host1x_client_iommu_attach(client);
 78	if (err < 0) {
 79		dev_err(client->dev, "failed to attach to domain: %d\n", err);
 80		goto free;
 81	}
 82
 83	err = tegra_drm_register_client(dev->dev_private, drm);
 84	if (err < 0) {
 85		dev_err(client->dev, "failed to register client: %d\n", err);
 86		goto detach_iommu;
 87	}
 88
 89	return 0;
 90
 91detach_iommu:
 92	host1x_client_iommu_detach(client);
 93free:
 94	host1x_syncpt_put(client->syncpts[0]);
 95put:
 96	host1x_channel_put(gr3d->channel);
 97	return err;
 98}
 99
100static int gr3d_exit(struct host1x_client *client)
101{
102	struct tegra_drm_client *drm = host1x_to_drm_client(client);
103	struct drm_device *dev = dev_get_drvdata(client->host);
104	struct gr3d *gr3d = to_gr3d(drm);
105	int err;
106
107	err = tegra_drm_unregister_client(dev->dev_private, drm);
108	if (err < 0)
109		return err;
110
111	pm_runtime_dont_use_autosuspend(client->dev);
112	pm_runtime_force_suspend(client->dev);
113
114	host1x_client_iommu_detach(client);
115	host1x_syncpt_put(client->syncpts[0]);
116	host1x_channel_put(gr3d->channel);
117
118	gr3d->channel = NULL;
119
120	return 0;
121}
122
123static const struct host1x_client_ops gr3d_client_ops = {
124	.init = gr3d_init,
125	.exit = gr3d_exit,
126};
127
128static int gr3d_open_channel(struct tegra_drm_client *client,
129			     struct tegra_drm_context *context)
130{
131	struct gr3d *gr3d = to_gr3d(client);
132
133	context->channel = host1x_channel_get(gr3d->channel);
134	if (!context->channel)
135		return -ENOMEM;
136
137	return 0;
138}
139
140static void gr3d_close_channel(struct tegra_drm_context *context)
141{
142	host1x_channel_put(context->channel);
143}
144
145static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
146{
147	struct gr3d *gr3d = dev_get_drvdata(dev);
148
149	switch (class) {
150	case HOST1X_CLASS_HOST1X:
151		if (offset == 0x2b)
152			return 1;
153
154		break;
155
156	case HOST1X_CLASS_GR3D:
157		if (offset >= GR3D_NUM_REGS)
158			break;
159
160		if (test_bit(offset, gr3d->addr_regs))
161			return 1;
162
163		break;
164	}
165
166	return 0;
167}
168
169static const struct tegra_drm_client_ops gr3d_ops = {
170	.open_channel = gr3d_open_channel,
171	.close_channel = gr3d_close_channel,
172	.is_addr_reg = gr3d_is_addr_reg,
173	.submit = tegra_drm_submit,
174};
175
176static const struct gr3d_soc tegra20_gr3d_soc = {
177	.version = 0x20,
178	.num_clocks = 1,
179	.num_resets = 2,
180};
181
182static const struct gr3d_soc tegra30_gr3d_soc = {
183	.version = 0x30,
184	.num_clocks = 2,
185	.num_resets = 4,
186};
187
188static const struct gr3d_soc tegra114_gr3d_soc = {
189	.version = 0x35,
190	.num_clocks = 1,
191	.num_resets = 2,
192};
193
194static const struct of_device_id tegra_gr3d_match[] = {
195	{ .compatible = "nvidia,tegra114-gr3d", .data = &tegra114_gr3d_soc },
196	{ .compatible = "nvidia,tegra30-gr3d", .data = &tegra30_gr3d_soc },
197	{ .compatible = "nvidia,tegra20-gr3d", .data = &tegra20_gr3d_soc },
198	{ }
199};
200MODULE_DEVICE_TABLE(of, tegra_gr3d_match);
201
202static const u32 gr3d_addr_regs[] = {
203	GR3D_IDX_ATTRIBUTE( 0),
204	GR3D_IDX_ATTRIBUTE( 1),
205	GR3D_IDX_ATTRIBUTE( 2),
206	GR3D_IDX_ATTRIBUTE( 3),
207	GR3D_IDX_ATTRIBUTE( 4),
208	GR3D_IDX_ATTRIBUTE( 5),
209	GR3D_IDX_ATTRIBUTE( 6),
210	GR3D_IDX_ATTRIBUTE( 7),
211	GR3D_IDX_ATTRIBUTE( 8),
212	GR3D_IDX_ATTRIBUTE( 9),
213	GR3D_IDX_ATTRIBUTE(10),
214	GR3D_IDX_ATTRIBUTE(11),
215	GR3D_IDX_ATTRIBUTE(12),
216	GR3D_IDX_ATTRIBUTE(13),
217	GR3D_IDX_ATTRIBUTE(14),
218	GR3D_IDX_ATTRIBUTE(15),
219	GR3D_IDX_INDEX_BASE,
220	GR3D_QR_ZTAG_ADDR,
221	GR3D_QR_CTAG_ADDR,
222	GR3D_QR_CZ_ADDR,
223	GR3D_TEX_TEX_ADDR( 0),
224	GR3D_TEX_TEX_ADDR( 1),
225	GR3D_TEX_TEX_ADDR( 2),
226	GR3D_TEX_TEX_ADDR( 3),
227	GR3D_TEX_TEX_ADDR( 4),
228	GR3D_TEX_TEX_ADDR( 5),
229	GR3D_TEX_TEX_ADDR( 6),
230	GR3D_TEX_TEX_ADDR( 7),
231	GR3D_TEX_TEX_ADDR( 8),
232	GR3D_TEX_TEX_ADDR( 9),
233	GR3D_TEX_TEX_ADDR(10),
234	GR3D_TEX_TEX_ADDR(11),
235	GR3D_TEX_TEX_ADDR(12),
236	GR3D_TEX_TEX_ADDR(13),
237	GR3D_TEX_TEX_ADDR(14),
238	GR3D_TEX_TEX_ADDR(15),
239	GR3D_DW_MEMORY_OUTPUT_ADDRESS,
240	GR3D_GLOBAL_SURFADDR( 0),
241	GR3D_GLOBAL_SURFADDR( 1),
242	GR3D_GLOBAL_SURFADDR( 2),
243	GR3D_GLOBAL_SURFADDR( 3),
244	GR3D_GLOBAL_SURFADDR( 4),
245	GR3D_GLOBAL_SURFADDR( 5),
246	GR3D_GLOBAL_SURFADDR( 6),
247	GR3D_GLOBAL_SURFADDR( 7),
248	GR3D_GLOBAL_SURFADDR( 8),
249	GR3D_GLOBAL_SURFADDR( 9),
250	GR3D_GLOBAL_SURFADDR(10),
251	GR3D_GLOBAL_SURFADDR(11),
252	GR3D_GLOBAL_SURFADDR(12),
253	GR3D_GLOBAL_SURFADDR(13),
254	GR3D_GLOBAL_SURFADDR(14),
255	GR3D_GLOBAL_SURFADDR(15),
256	GR3D_GLOBAL_SPILLSURFADDR,
257	GR3D_GLOBAL_SURFOVERADDR( 0),
258	GR3D_GLOBAL_SURFOVERADDR( 1),
259	GR3D_GLOBAL_SURFOVERADDR( 2),
260	GR3D_GLOBAL_SURFOVERADDR( 3),
261	GR3D_GLOBAL_SURFOVERADDR( 4),
262	GR3D_GLOBAL_SURFOVERADDR( 5),
263	GR3D_GLOBAL_SURFOVERADDR( 6),
264	GR3D_GLOBAL_SURFOVERADDR( 7),
265	GR3D_GLOBAL_SURFOVERADDR( 8),
266	GR3D_GLOBAL_SURFOVERADDR( 9),
267	GR3D_GLOBAL_SURFOVERADDR(10),
268	GR3D_GLOBAL_SURFOVERADDR(11),
269	GR3D_GLOBAL_SURFOVERADDR(12),
270	GR3D_GLOBAL_SURFOVERADDR(13),
271	GR3D_GLOBAL_SURFOVERADDR(14),
272	GR3D_GLOBAL_SURFOVERADDR(15),
273	GR3D_GLOBAL_SAMP01SURFADDR( 0),
274	GR3D_GLOBAL_SAMP01SURFADDR( 1),
275	GR3D_GLOBAL_SAMP01SURFADDR( 2),
276	GR3D_GLOBAL_SAMP01SURFADDR( 3),
277	GR3D_GLOBAL_SAMP01SURFADDR( 4),
278	GR3D_GLOBAL_SAMP01SURFADDR( 5),
279	GR3D_GLOBAL_SAMP01SURFADDR( 6),
280	GR3D_GLOBAL_SAMP01SURFADDR( 7),
281	GR3D_GLOBAL_SAMP01SURFADDR( 8),
282	GR3D_GLOBAL_SAMP01SURFADDR( 9),
283	GR3D_GLOBAL_SAMP01SURFADDR(10),
284	GR3D_GLOBAL_SAMP01SURFADDR(11),
285	GR3D_GLOBAL_SAMP01SURFADDR(12),
286	GR3D_GLOBAL_SAMP01SURFADDR(13),
287	GR3D_GLOBAL_SAMP01SURFADDR(14),
288	GR3D_GLOBAL_SAMP01SURFADDR(15),
289	GR3D_GLOBAL_SAMP23SURFADDR( 0),
290	GR3D_GLOBAL_SAMP23SURFADDR( 1),
291	GR3D_GLOBAL_SAMP23SURFADDR( 2),
292	GR3D_GLOBAL_SAMP23SURFADDR( 3),
293	GR3D_GLOBAL_SAMP23SURFADDR( 4),
294	GR3D_GLOBAL_SAMP23SURFADDR( 5),
295	GR3D_GLOBAL_SAMP23SURFADDR( 6),
296	GR3D_GLOBAL_SAMP23SURFADDR( 7),
297	GR3D_GLOBAL_SAMP23SURFADDR( 8),
298	GR3D_GLOBAL_SAMP23SURFADDR( 9),
299	GR3D_GLOBAL_SAMP23SURFADDR(10),
300	GR3D_GLOBAL_SAMP23SURFADDR(11),
301	GR3D_GLOBAL_SAMP23SURFADDR(12),
302	GR3D_GLOBAL_SAMP23SURFADDR(13),
303	GR3D_GLOBAL_SAMP23SURFADDR(14),
304	GR3D_GLOBAL_SAMP23SURFADDR(15),
305};
306
307static int gr3d_power_up_legacy_domain(struct device *dev, const char *name,
308				       unsigned int id)
309{
310	struct gr3d *gr3d = dev_get_drvdata(dev);
311	struct reset_control *reset;
312	struct clk *clk;
313	unsigned int i;
314	int err;
315
316	/*
317	 * Tegra20 device-tree doesn't specify 3d clock name and there is only
318	 * one clock for Tegra20. Tegra30+ device-trees always specified names
319	 * for the clocks.
320	 */
321	if (gr3d->nclocks == 1) {
322		if (id == TEGRA_POWERGATE_3D1)
323			return 0;
324
325		clk = gr3d->clocks[0].clk;
326	} else {
327		for (i = 0; i < gr3d->nclocks; i++) {
328			if (WARN_ON(!gr3d->clocks[i].id))
329				continue;
330
331			if (!strcmp(gr3d->clocks[i].id, name)) {
332				clk = gr3d->clocks[i].clk;
333				break;
334			}
335		}
336
337		if (WARN_ON(i == gr3d->nclocks))
338			return -EINVAL;
339	}
340
341	/*
342	 * We use array of resets, which includes MC resets, and MC
343	 * reset shouldn't be asserted while hardware is gated because
344	 * MC flushing will fail for gated hardware. Hence for legacy
345	 * PD we request the individual reset separately.
346	 */
347	reset = reset_control_get_exclusive_released(dev, name);
348	if (IS_ERR(reset))
349		return PTR_ERR(reset);
350
351	err = reset_control_acquire(reset);
352	if (err) {
353		dev_err(dev, "failed to acquire %s reset: %d\n", name, err);
354	} else {
355		err = tegra_powergate_sequence_power_up(id, clk, reset);
356		reset_control_release(reset);
357	}
358
359	reset_control_put(reset);
360	if (err)
361		return err;
362
363	/*
364	 * tegra_powergate_sequence_power_up() leaves clocks enabled,
365	 * while GENPD not. Hence keep clock-enable balanced.
366	 */
367	clk_disable_unprepare(clk);
368
369	return 0;
370}
371
372static void gr3d_del_link(void *link)
373{
374	device_link_del(link);
375}
376
377static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
378{
379	static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL };
380	const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
381	struct device **opp_virt_devs, *pd_dev;
382	struct device_link *link;
383	unsigned int i;
384	int err;
385
386	err = of_count_phandle_with_args(dev->of_node, "power-domains",
387					 "#power-domain-cells");
388	if (err < 0) {
389		if (err != -ENOENT)
390			return err;
391
392		/*
393		 * Older device-trees don't use GENPD. In this case we should
394		 * toggle power domain manually.
395		 */
396		err = gr3d_power_up_legacy_domain(dev, "3d",
397						  TEGRA_POWERGATE_3D);
398		if (err)
399			return err;
400
401		err = gr3d_power_up_legacy_domain(dev, "3d2",
402						  TEGRA_POWERGATE_3D1);
403		if (err)
404			return err;
405
406		return 0;
407	}
408
409	/*
410	 * The PM domain core automatically attaches a single power domain,
411	 * otherwise it skips attaching completely. We have a single domain
412	 * on Tegra20 and two domains on Tegra30+.
413	 */
414	if (dev->pm_domain)
415		return 0;
416
417	err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs);
418	if (err)
419		return err;
420
421	for (i = 0; opp_genpd_names[i]; i++) {
422		pd_dev = opp_virt_devs[i];
423		if (!pd_dev) {
424			dev_err(dev, "failed to get %s power domain\n",
425				opp_genpd_names[i]);
426			return -EINVAL;
427		}
428
429		link = device_link_add(dev, pd_dev, link_flags);
430		if (!link) {
431			dev_err(dev, "failed to link to %s\n", dev_name(pd_dev));
432			return -EINVAL;
433		}
434
435		err = devm_add_action_or_reset(dev, gr3d_del_link, link);
436		if (err)
437			return err;
438	}
439
440	return 0;
441}
442
443static int gr3d_get_clocks(struct device *dev, struct gr3d *gr3d)
444{
445	int err;
446
447	err = devm_clk_bulk_get_all(dev, &gr3d->clocks);
448	if (err < 0) {
449		dev_err(dev, "failed to get clock: %d\n", err);
450		return err;
451	}
452	gr3d->nclocks = err;
453
454	if (gr3d->nclocks != gr3d->soc->num_clocks) {
455		dev_err(dev, "invalid number of clocks: %u\n", gr3d->nclocks);
456		return -ENOENT;
457	}
458
459	return 0;
460}
461
462static int gr3d_get_resets(struct device *dev, struct gr3d *gr3d)
463{
464	int err;
465
466	gr3d->resets[RST_MC].id = "mc";
467	gr3d->resets[RST_MC2].id = "mc2";
468	gr3d->resets[RST_GR3D].id = "3d";
469	gr3d->resets[RST_GR3D2].id = "3d2";
470	gr3d->nresets = gr3d->soc->num_resets;
471
472	err = devm_reset_control_bulk_get_optional_exclusive_released(
473				dev, gr3d->nresets, gr3d->resets);
474	if (err) {
475		dev_err(dev, "failed to get reset: %d\n", err);
476		return err;
477	}
478
479	if (WARN_ON(!gr3d->resets[RST_GR3D].rstc) ||
480	    WARN_ON(!gr3d->resets[RST_GR3D2].rstc && gr3d->nresets == 4))
481		return -ENOENT;
482
483	return 0;
484}
485
486static int gr3d_probe(struct platform_device *pdev)
487{
488	struct host1x_syncpt **syncpts;
489	struct gr3d *gr3d;
490	unsigned int i;
491	int err;
492
493	gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
494	if (!gr3d)
495		return -ENOMEM;
496
497	platform_set_drvdata(pdev, gr3d);
498
499	gr3d->soc = of_device_get_match_data(&pdev->dev);
500
501	syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
502	if (!syncpts)
503		return -ENOMEM;
504
505	err = gr3d_get_clocks(&pdev->dev, gr3d);
506	if (err)
507		return err;
508
509	err = gr3d_get_resets(&pdev->dev, gr3d);
510	if (err)
511		return err;
512
513	err = gr3d_init_power(&pdev->dev, gr3d);
514	if (err)
515		return err;
516
517	INIT_LIST_HEAD(&gr3d->client.base.list);
518	gr3d->client.base.ops = &gr3d_client_ops;
519	gr3d->client.base.dev = &pdev->dev;
520	gr3d->client.base.class = HOST1X_CLASS_GR3D;
521	gr3d->client.base.syncpts = syncpts;
522	gr3d->client.base.num_syncpts = 1;
523
524	INIT_LIST_HEAD(&gr3d->client.list);
525	gr3d->client.version = gr3d->soc->version;
526	gr3d->client.ops = &gr3d_ops;
527
528	err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
529	if (err)
530		return err;
531
532	err = host1x_client_register(&gr3d->client.base);
533	if (err < 0) {
534		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
535			err);
536		return err;
537	}
538
539	/* initialize address register map */
540	for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
541		set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
542
543	return 0;
544}
545
546static void gr3d_remove(struct platform_device *pdev)
547{
548	struct gr3d *gr3d = platform_get_drvdata(pdev);
549
550	pm_runtime_disable(&pdev->dev);
551	host1x_client_unregister(&gr3d->client.base);
552}
553
554static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
555{
556	struct gr3d *gr3d = dev_get_drvdata(dev);
557	int err;
558
559	host1x_channel_stop(gr3d->channel);
560
561	err = reset_control_bulk_assert(gr3d->nresets, gr3d->resets);
562	if (err) {
563		dev_err(dev, "failed to assert reset: %d\n", err);
564		return err;
565	}
566
567	usleep_range(10, 20);
568
569	/*
570	 * Older device-trees don't specify MC resets and power-gating can't
571	 * be done safely in that case. Hence we will keep the power ungated
572	 * for older DTBs. For newer DTBs, GENPD will perform the power-gating.
573	 */
574
575	clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
576	reset_control_bulk_release(gr3d->nresets, gr3d->resets);
577
578	return 0;
579}
580
581static int __maybe_unused gr3d_runtime_resume(struct device *dev)
582{
583	struct gr3d *gr3d = dev_get_drvdata(dev);
584	int err;
585
586	err = reset_control_bulk_acquire(gr3d->nresets, gr3d->resets);
587	if (err) {
588		dev_err(dev, "failed to acquire reset: %d\n", err);
589		return err;
590	}
591
592	err = clk_bulk_prepare_enable(gr3d->nclocks, gr3d->clocks);
593	if (err) {
594		dev_err(dev, "failed to enable clock: %d\n", err);
595		goto release_reset;
596	}
597
598	err = reset_control_bulk_deassert(gr3d->nresets, gr3d->resets);
599	if (err) {
600		dev_err(dev, "failed to deassert reset: %d\n", err);
601		goto disable_clk;
602	}
603
604	pm_runtime_enable(dev);
605	pm_runtime_use_autosuspend(dev);
606	pm_runtime_set_autosuspend_delay(dev, 500);
607
608	return 0;
609
610disable_clk:
611	clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
612release_reset:
613	reset_control_bulk_release(gr3d->nresets, gr3d->resets);
614
615	return err;
616}
617
618static const struct dev_pm_ops tegra_gr3d_pm = {
619	SET_RUNTIME_PM_OPS(gr3d_runtime_suspend, gr3d_runtime_resume, NULL)
620	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
621				pm_runtime_force_resume)
622};
623
624struct platform_driver tegra_gr3d_driver = {
625	.driver = {
626		.name = "tegra-gr3d",
627		.of_match_table = tegra_gr3d_match,
628		.pm = &tegra_gr3d_pm,
629	},
630	.probe = gr3d_probe,
631	.remove_new = gr3d_remove,
632};