Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Tegra host1x driver
4 *
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 */
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/dma-mapping.h>
11#include <linux/io.h>
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_platform.h>
16#include <linux/platform_device.h>
17#include <linux/pm_runtime.h>
18#include <linux/slab.h>
19
20#include <soc/tegra/common.h>
21
22#define CREATE_TRACE_POINTS
23#include <trace/events/host1x.h>
24#undef CREATE_TRACE_POINTS
25
26#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
27#include <asm/dma-iommu.h>
28#endif
29
30#include "bus.h"
31#include "channel.h"
32#include "context.h"
33#include "debug.h"
34#include "dev.h"
35#include "intr.h"
36
37#include "hw/host1x01.h"
38#include "hw/host1x02.h"
39#include "hw/host1x04.h"
40#include "hw/host1x05.h"
41#include "hw/host1x06.h"
42#include "hw/host1x07.h"
43#include "hw/host1x08.h"
44
45void host1x_common_writel(struct host1x *host1x, u32 v, u32 r)
46{
47 writel(v, host1x->common_regs + r);
48}
49
50void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
51{
52 writel(v, host1x->hv_regs + r);
53}
54
55u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
56{
57 return readl(host1x->hv_regs + r);
58}
59
60void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
61{
62 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
63
64 writel(v, sync_regs + r);
65}
66
67u32 host1x_sync_readl(struct host1x *host1x, u32 r)
68{
69 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
70
71 return readl(sync_regs + r);
72}
73
74void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
75{
76 writel(v, ch->regs + r);
77}
78
79u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
80{
81 return readl(ch->regs + r);
82}
83
84static const struct host1x_info host1x01_info = {
85 .nb_channels = 8,
86 .nb_pts = 32,
87 .nb_mlocks = 16,
88 .nb_bases = 8,
89 .init = host1x01_init,
90 .sync_offset = 0x3000,
91 .dma_mask = DMA_BIT_MASK(32),
92 .has_wide_gather = false,
93 .has_hypervisor = false,
94 .num_sid_entries = 0,
95 .sid_table = NULL,
96 .reserve_vblank_syncpts = true,
97};
98
99static const struct host1x_info host1x02_info = {
100 .nb_channels = 9,
101 .nb_pts = 32,
102 .nb_mlocks = 16,
103 .nb_bases = 12,
104 .init = host1x02_init,
105 .sync_offset = 0x3000,
106 .dma_mask = DMA_BIT_MASK(32),
107 .has_wide_gather = false,
108 .has_hypervisor = false,
109 .num_sid_entries = 0,
110 .sid_table = NULL,
111 .reserve_vblank_syncpts = true,
112};
113
114static const struct host1x_info host1x04_info = {
115 .nb_channels = 12,
116 .nb_pts = 192,
117 .nb_mlocks = 16,
118 .nb_bases = 64,
119 .init = host1x04_init,
120 .sync_offset = 0x2100,
121 .dma_mask = DMA_BIT_MASK(34),
122 .has_wide_gather = false,
123 .has_hypervisor = false,
124 .num_sid_entries = 0,
125 .sid_table = NULL,
126 .reserve_vblank_syncpts = false,
127};
128
129static const struct host1x_info host1x05_info = {
130 .nb_channels = 14,
131 .nb_pts = 192,
132 .nb_mlocks = 16,
133 .nb_bases = 64,
134 .init = host1x05_init,
135 .sync_offset = 0x2100,
136 .dma_mask = DMA_BIT_MASK(34),
137 .has_wide_gather = false,
138 .has_hypervisor = false,
139 .num_sid_entries = 0,
140 .sid_table = NULL,
141 .reserve_vblank_syncpts = false,
142};
143
144static const struct host1x_sid_entry tegra186_sid_table[] = {
145 {
146 /* VIC */
147 .base = 0x1af0,
148 .offset = 0x30,
149 .limit = 0x34
150 },
151 {
152 /* NVDEC */
153 .base = 0x1b00,
154 .offset = 0x30,
155 .limit = 0x34
156 },
157};
158
159static const struct host1x_info host1x06_info = {
160 .nb_channels = 63,
161 .nb_pts = 576,
162 .nb_mlocks = 24,
163 .nb_bases = 16,
164 .init = host1x06_init,
165 .sync_offset = 0x0,
166 .dma_mask = DMA_BIT_MASK(40),
167 .has_wide_gather = true,
168 .has_hypervisor = true,
169 .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
170 .sid_table = tegra186_sid_table,
171 .reserve_vblank_syncpts = false,
172 .skip_reset_assert = true,
173};
174
175static const struct host1x_sid_entry tegra194_sid_table[] = {
176 {
177 /* VIC */
178 .base = 0x1af0,
179 .offset = 0x30,
180 .limit = 0x34
181 },
182 {
183 /* NVDEC */
184 .base = 0x1b00,
185 .offset = 0x30,
186 .limit = 0x34
187 },
188 {
189 /* NVDEC1 */
190 .base = 0x1bc0,
191 .offset = 0x30,
192 .limit = 0x34
193 },
194};
195
196static const struct host1x_info host1x07_info = {
197 .nb_channels = 63,
198 .nb_pts = 704,
199 .nb_mlocks = 32,
200 .nb_bases = 0,
201 .init = host1x07_init,
202 .sync_offset = 0x0,
203 .dma_mask = DMA_BIT_MASK(40),
204 .has_wide_gather = true,
205 .has_hypervisor = true,
206 .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
207 .sid_table = tegra194_sid_table,
208 .reserve_vblank_syncpts = false,
209};
210
211/*
212 * Tegra234 has two stream ID protection tables, one for setting stream IDs
213 * through the channel path via SETSTREAMID, and one for setting them via
214 * MMIO. We program each engine's data stream ID in the channel path table
215 * and firmware stream ID in the MMIO path table.
216 */
217static const struct host1x_sid_entry tegra234_sid_table[] = {
218 {
219 /* VIC channel */
220 .base = 0x17b8,
221 .offset = 0x30,
222 .limit = 0x30
223 },
224 {
225 /* VIC MMIO */
226 .base = 0x1688,
227 .offset = 0x34,
228 .limit = 0x34
229 },
230 {
231 /* NVDEC channel */
232 .base = 0x17c8,
233 .offset = 0x30,
234 .limit = 0x30,
235 },
236 {
237 /* NVDEC MMIO */
238 .base = 0x1698,
239 .offset = 0x34,
240 .limit = 0x34,
241 },
242};
243
244static const struct host1x_info host1x08_info = {
245 .nb_channels = 63,
246 .nb_pts = 1024,
247 .nb_mlocks = 24,
248 .nb_bases = 0,
249 .init = host1x08_init,
250 .sync_offset = 0x0,
251 .dma_mask = DMA_BIT_MASK(40),
252 .has_wide_gather = true,
253 .has_hypervisor = true,
254 .has_common = true,
255 .num_sid_entries = ARRAY_SIZE(tegra234_sid_table),
256 .sid_table = tegra234_sid_table,
257 .streamid_vm_table = { 0x1004, 128 },
258 .classid_vm_table = { 0x1404, 25 },
259 .mmio_vm_table = { 0x1504, 25 },
260 .reserve_vblank_syncpts = false,
261};
262
263static const struct of_device_id host1x_of_match[] = {
264 { .compatible = "nvidia,tegra234-host1x", .data = &host1x08_info, },
265 { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
266 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
267 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
268 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
269 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
270 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
271 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
272 { },
273};
274MODULE_DEVICE_TABLE(of, host1x_of_match);
275
276static void host1x_setup_virtualization_tables(struct host1x *host)
277{
278 const struct host1x_info *info = host->info;
279 unsigned int i;
280
281 if (!info->has_hypervisor)
282 return;
283
284 for (i = 0; i < info->num_sid_entries; i++) {
285 const struct host1x_sid_entry *entry = &info->sid_table[i];
286
287 host1x_hypervisor_writel(host, entry->offset, entry->base);
288 host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
289 }
290
291 for (i = 0; i < info->streamid_vm_table.count; i++) {
292 /* Allow access to all stream IDs to all VMs. */
293 host1x_hypervisor_writel(host, 0xff, info->streamid_vm_table.base + 4 * i);
294 }
295
296 for (i = 0; i < info->classid_vm_table.count; i++) {
297 /* Allow access to all classes to all VMs. */
298 host1x_hypervisor_writel(host, 0xff, info->classid_vm_table.base + 4 * i);
299 }
300
301 for (i = 0; i < info->mmio_vm_table.count; i++) {
302 /* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */
303 host1x_hypervisor_writel(host, 0x1, info->mmio_vm_table.base + 4 * i);
304 }
305}
306
307static bool host1x_wants_iommu(struct host1x *host1x)
308{
309 /* Our IOMMU usage policy doesn't currently play well with GART */
310 if (of_machine_is_compatible("nvidia,tegra20"))
311 return false;
312
313 /*
314 * If we support addressing a maximum of 32 bits of physical memory
315 * and if the host1x firewall is enabled, there's no need to enable
316 * IOMMU support. This can happen for example on Tegra20, Tegra30
317 * and Tegra114.
318 *
319 * Tegra124 and later can address up to 34 bits of physical memory and
320 * many platforms come equipped with more than 2 GiB of system memory,
321 * which requires crossing the 4 GiB boundary. But there's a catch: on
322 * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
323 * only address up to 32 bits of memory in GATHER opcodes, which means
324 * that command buffers need to either be in the first 2 GiB of system
325 * memory (which could quickly lead to memory exhaustion), or command
326 * buffers need to be treated differently from other buffers (which is
327 * not possible with the current ABI).
328 *
329 * A third option is to use the IOMMU in these cases to make sure all
330 * buffers will be mapped into a 32-bit IOVA space that host1x can
331 * address. This allows all of the system memory to be used and works
332 * within the limitations of the host1x on these SoCs.
333 *
334 * In summary, default to enable IOMMU on Tegra124 and later. For any
335 * of the earlier SoCs, only use the IOMMU for additional safety when
336 * the host1x firewall is disabled.
337 */
338 if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
339 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
340 return false;
341 }
342
343 return true;
344}
345
346static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
347{
348 struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
349 int err;
350
351#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
352 if (host->dev->archdata.mapping) {
353 struct dma_iommu_mapping *mapping =
354 to_dma_iommu_mapping(host->dev);
355 arm_iommu_detach_device(host->dev);
356 arm_iommu_release_mapping(mapping);
357
358 domain = iommu_get_domain_for_dev(host->dev);
359 }
360#endif
361
362 /*
363 * We may not always want to enable IOMMU support (for example if the
364 * host1x firewall is already enabled and we don't support addressing
365 * more than 32 bits of physical memory), so check for that first.
366 *
367 * Similarly, if host1x is already attached to an IOMMU (via the DMA
368 * API), don't try to attach again.
369 */
370 if (!host1x_wants_iommu(host) || domain)
371 return domain;
372
373 host->group = iommu_group_get(host->dev);
374 if (host->group) {
375 struct iommu_domain_geometry *geometry;
376 dma_addr_t start, end;
377 unsigned long order;
378
379 err = iova_cache_get();
380 if (err < 0)
381 goto put_group;
382
383 host->domain = iommu_domain_alloc(&platform_bus_type);
384 if (!host->domain) {
385 err = -ENOMEM;
386 goto put_cache;
387 }
388
389 err = iommu_attach_group(host->domain, host->group);
390 if (err) {
391 if (err == -ENODEV)
392 err = 0;
393
394 goto free_domain;
395 }
396
397 geometry = &host->domain->geometry;
398 start = geometry->aperture_start & host->info->dma_mask;
399 end = geometry->aperture_end & host->info->dma_mask;
400
401 order = __ffs(host->domain->pgsize_bitmap);
402 init_iova_domain(&host->iova, 1UL << order, start >> order);
403 host->iova_end = end;
404
405 domain = host->domain;
406 }
407
408 return domain;
409
410free_domain:
411 iommu_domain_free(host->domain);
412 host->domain = NULL;
413put_cache:
414 iova_cache_put();
415put_group:
416 iommu_group_put(host->group);
417 host->group = NULL;
418
419 return ERR_PTR(err);
420}
421
422static int host1x_iommu_init(struct host1x *host)
423{
424 u64 mask = host->info->dma_mask;
425 struct iommu_domain *domain;
426 int err;
427
428 domain = host1x_iommu_attach(host);
429 if (IS_ERR(domain)) {
430 err = PTR_ERR(domain);
431 dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
432 return err;
433 }
434
435 /*
436 * If we're not behind an IOMMU make sure we don't get push buffers
437 * that are allocated outside of the range addressable by the GATHER
438 * opcode.
439 *
440 * Newer generations of Tegra (Tegra186 and later) support a wide
441 * variant of the GATHER opcode that allows addressing more bits.
442 */
443 if (!domain && !host->info->has_wide_gather)
444 mask = DMA_BIT_MASK(32);
445
446 err = dma_coerce_mask_and_coherent(host->dev, mask);
447 if (err < 0) {
448 dev_err(host->dev, "failed to set DMA mask: %d\n", err);
449 return err;
450 }
451
452 return 0;
453}
454
455static void host1x_iommu_exit(struct host1x *host)
456{
457 if (host->domain) {
458 put_iova_domain(&host->iova);
459 iommu_detach_group(host->domain, host->group);
460
461 iommu_domain_free(host->domain);
462 host->domain = NULL;
463
464 iova_cache_put();
465
466 iommu_group_put(host->group);
467 host->group = NULL;
468 }
469}
470
471static int host1x_get_resets(struct host1x *host)
472{
473 int err;
474
475 host->resets[0].id = "mc";
476 host->resets[1].id = "host1x";
477 host->nresets = ARRAY_SIZE(host->resets);
478
479 err = devm_reset_control_bulk_get_optional_exclusive_released(
480 host->dev, host->nresets, host->resets);
481 if (err) {
482 dev_err(host->dev, "failed to get reset: %d\n", err);
483 return err;
484 }
485
486 return 0;
487}
488
489static int host1x_probe(struct platform_device *pdev)
490{
491 struct host1x *host;
492 int err, i;
493
494 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
495 if (!host)
496 return -ENOMEM;
497
498 host->info = of_device_get_match_data(&pdev->dev);
499
500 if (host->info->has_hypervisor) {
501 host->regs = devm_platform_ioremap_resource_byname(pdev, "vm");
502 if (IS_ERR(host->regs))
503 return PTR_ERR(host->regs);
504
505 host->hv_regs = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
506 if (IS_ERR(host->hv_regs))
507 return PTR_ERR(host->hv_regs);
508
509 if (host->info->has_common) {
510 host->common_regs = devm_platform_ioremap_resource_byname(pdev, "common");
511 if (IS_ERR(host->common_regs))
512 return PTR_ERR(host->common_regs);
513 }
514 } else {
515 host->regs = devm_platform_ioremap_resource(pdev, 0);
516 if (IS_ERR(host->regs))
517 return PTR_ERR(host->regs);
518 }
519
520 for (i = 0; i < ARRAY_SIZE(host->syncpt_irqs); i++) {
521 char irq_name[] = "syncptX";
522
523 sprintf(irq_name, "syncpt%d", i);
524
525 err = platform_get_irq_byname_optional(pdev, irq_name);
526 if (err == -ENXIO)
527 break;
528 if (err < 0)
529 return err;
530
531 host->syncpt_irqs[i] = err;
532 }
533
534 host->num_syncpt_irqs = i;
535
536 /* Device tree without irq names */
537 if (i == 0) {
538 host->syncpt_irqs[0] = platform_get_irq(pdev, 0);
539 if (host->syncpt_irqs[0] < 0)
540 return host->syncpt_irqs[0];
541
542 host->num_syncpt_irqs = 1;
543 }
544
545 mutex_init(&host->devices_lock);
546 INIT_LIST_HEAD(&host->devices);
547 INIT_LIST_HEAD(&host->list);
548 host->dev = &pdev->dev;
549
550 /* set common host1x device data */
551 platform_set_drvdata(pdev, host);
552
553 host->dev->dma_parms = &host->dma_parms;
554 dma_set_max_seg_size(host->dev, UINT_MAX);
555
556 if (host->info->init) {
557 err = host->info->init(host);
558 if (err)
559 return err;
560 }
561
562 host->clk = devm_clk_get(&pdev->dev, NULL);
563 if (IS_ERR(host->clk)) {
564 err = PTR_ERR(host->clk);
565
566 if (err != -EPROBE_DEFER)
567 dev_err(&pdev->dev, "failed to get clock: %d\n", err);
568
569 return err;
570 }
571
572 err = host1x_get_resets(host);
573 if (err)
574 return err;
575
576 host1x_bo_cache_init(&host->cache);
577
578 err = host1x_iommu_init(host);
579 if (err < 0) {
580 dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
581 goto destroy_cache;
582 }
583
584 err = host1x_channel_list_init(&host->channel_list,
585 host->info->nb_channels);
586 if (err) {
587 dev_err(&pdev->dev, "failed to initialize channel list\n");
588 goto iommu_exit;
589 }
590
591 err = host1x_memory_context_list_init(host);
592 if (err) {
593 dev_err(&pdev->dev, "failed to initialize context list\n");
594 goto free_channels;
595 }
596
597 err = host1x_syncpt_init(host);
598 if (err) {
599 dev_err(&pdev->dev, "failed to initialize syncpts\n");
600 goto free_contexts;
601 }
602
603 err = host1x_intr_init(host);
604 if (err) {
605 dev_err(&pdev->dev, "failed to initialize interrupts\n");
606 goto deinit_syncpt;
607 }
608
609 pm_runtime_enable(&pdev->dev);
610
611 err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
612 if (err)
613 goto pm_disable;
614
615 /* the driver's code isn't ready yet for the dynamic RPM */
616 err = pm_runtime_resume_and_get(&pdev->dev);
617 if (err)
618 goto pm_disable;
619
620 host1x_debug_init(host);
621
622 err = host1x_register(host);
623 if (err < 0)
624 goto deinit_debugfs;
625
626 err = devm_of_platform_populate(&pdev->dev);
627 if (err < 0)
628 goto unregister;
629
630 return 0;
631
632unregister:
633 host1x_unregister(host);
634deinit_debugfs:
635 host1x_debug_deinit(host);
636
637 pm_runtime_put_sync_suspend(&pdev->dev);
638pm_disable:
639 pm_runtime_disable(&pdev->dev);
640
641 host1x_intr_deinit(host);
642deinit_syncpt:
643 host1x_syncpt_deinit(host);
644free_contexts:
645 host1x_memory_context_list_free(&host->context_list);
646free_channels:
647 host1x_channel_list_free(&host->channel_list);
648iommu_exit:
649 host1x_iommu_exit(host);
650destroy_cache:
651 host1x_bo_cache_destroy(&host->cache);
652
653 return err;
654}
655
656static int host1x_remove(struct platform_device *pdev)
657{
658 struct host1x *host = platform_get_drvdata(pdev);
659
660 host1x_unregister(host);
661 host1x_debug_deinit(host);
662
663 pm_runtime_force_suspend(&pdev->dev);
664
665 host1x_intr_deinit(host);
666 host1x_syncpt_deinit(host);
667 host1x_memory_context_list_free(&host->context_list);
668 host1x_channel_list_free(&host->channel_list);
669 host1x_iommu_exit(host);
670 host1x_bo_cache_destroy(&host->cache);
671
672 return 0;
673}
674
675static int __maybe_unused host1x_runtime_suspend(struct device *dev)
676{
677 struct host1x *host = dev_get_drvdata(dev);
678 int err;
679
680 host1x_channel_stop_all(host);
681 host1x_intr_stop(host);
682 host1x_syncpt_save(host);
683
684 if (!host->info->skip_reset_assert) {
685 err = reset_control_bulk_assert(host->nresets, host->resets);
686 if (err) {
687 dev_err(dev, "failed to assert reset: %d\n", err);
688 goto resume_host1x;
689 }
690
691 usleep_range(1000, 2000);
692 }
693
694 clk_disable_unprepare(host->clk);
695 reset_control_bulk_release(host->nresets, host->resets);
696
697 return 0;
698
699resume_host1x:
700 host1x_setup_virtualization_tables(host);
701 host1x_syncpt_restore(host);
702 host1x_intr_start(host);
703
704 return err;
705}
706
707static int __maybe_unused host1x_runtime_resume(struct device *dev)
708{
709 struct host1x *host = dev_get_drvdata(dev);
710 int err;
711
712 err = reset_control_bulk_acquire(host->nresets, host->resets);
713 if (err) {
714 dev_err(dev, "failed to acquire reset: %d\n", err);
715 return err;
716 }
717
718 err = clk_prepare_enable(host->clk);
719 if (err) {
720 dev_err(dev, "failed to enable clock: %d\n", err);
721 goto release_reset;
722 }
723
724 err = reset_control_bulk_deassert(host->nresets, host->resets);
725 if (err < 0) {
726 dev_err(dev, "failed to deassert reset: %d\n", err);
727 goto disable_clk;
728 }
729
730 host1x_setup_virtualization_tables(host);
731 host1x_syncpt_restore(host);
732 host1x_intr_start(host);
733
734 return 0;
735
736disable_clk:
737 clk_disable_unprepare(host->clk);
738release_reset:
739 reset_control_bulk_release(host->nresets, host->resets);
740
741 return err;
742}
743
744static const struct dev_pm_ops host1x_pm_ops = {
745 SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume,
746 NULL)
747 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
748};
749
750static struct platform_driver tegra_host1x_driver = {
751 .driver = {
752 .name = "tegra-host1x",
753 .of_match_table = host1x_of_match,
754 .pm = &host1x_pm_ops,
755 },
756 .probe = host1x_probe,
757 .remove = host1x_remove,
758};
759
760static struct platform_driver * const drivers[] = {
761 &tegra_host1x_driver,
762 &tegra_mipi_driver,
763};
764
765static int __init tegra_host1x_init(void)
766{
767 int err;
768
769 err = bus_register(&host1x_bus_type);
770 if (err < 0)
771 return err;
772
773 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
774 if (err < 0)
775 bus_unregister(&host1x_bus_type);
776
777 return err;
778}
779module_init(tegra_host1x_init);
780
781static void __exit tegra_host1x_exit(void)
782{
783 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
784 bus_unregister(&host1x_bus_type);
785}
786module_exit(tegra_host1x_exit);
787
788/**
789 * host1x_get_dma_mask() - query the supported DMA mask for host1x
790 * @host1x: host1x instance
791 *
792 * Note that this returns the supported DMA mask for host1x, which can be
793 * different from the applicable DMA mask under certain circumstances.
794 */
795u64 host1x_get_dma_mask(struct host1x *host1x)
796{
797 return host1x->info->dma_mask;
798}
799EXPORT_SYMBOL(host1x_get_dma_mask);
800
801MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
802MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
803MODULE_DESCRIPTION("Host1x driver for Tegra products");
804MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Tegra host1x driver
4 *
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 */
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/dma-mapping.h>
11#include <linux/io.h>
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_platform.h>
16#include <linux/platform_device.h>
17#include <linux/pm_runtime.h>
18#include <linux/slab.h>
19
20#include <soc/tegra/common.h>
21
22#define CREATE_TRACE_POINTS
23#include <trace/events/host1x.h>
24#undef CREATE_TRACE_POINTS
25
26#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
27#include <asm/dma-iommu.h>
28#endif
29
30#include "bus.h"
31#include "channel.h"
32#include "context.h"
33#include "debug.h"
34#include "dev.h"
35#include "intr.h"
36
37#include "hw/host1x01.h"
38#include "hw/host1x02.h"
39#include "hw/host1x04.h"
40#include "hw/host1x05.h"
41#include "hw/host1x06.h"
42#include "hw/host1x07.h"
43#include "hw/host1x08.h"
44
45void host1x_common_writel(struct host1x *host1x, u32 v, u32 r)
46{
47 writel(v, host1x->common_regs + r);
48}
49
50void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
51{
52 writel(v, host1x->hv_regs + r);
53}
54
55u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
56{
57 return readl(host1x->hv_regs + r);
58}
59
60void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
61{
62 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
63
64 writel(v, sync_regs + r);
65}
66
67u32 host1x_sync_readl(struct host1x *host1x, u32 r)
68{
69 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
70
71 return readl(sync_regs + r);
72}
73
74void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
75{
76 writel(v, ch->regs + r);
77}
78
79u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
80{
81 return readl(ch->regs + r);
82}
83
84static const struct host1x_info host1x01_info = {
85 .nb_channels = 8,
86 .nb_pts = 32,
87 .nb_mlocks = 16,
88 .nb_bases = 8,
89 .init = host1x01_init,
90 .sync_offset = 0x3000,
91 .dma_mask = DMA_BIT_MASK(32),
92 .has_wide_gather = false,
93 .has_hypervisor = false,
94 .num_sid_entries = 0,
95 .sid_table = NULL,
96 .reserve_vblank_syncpts = true,
97};
98
99static const struct host1x_info host1x02_info = {
100 .nb_channels = 9,
101 .nb_pts = 32,
102 .nb_mlocks = 16,
103 .nb_bases = 12,
104 .init = host1x02_init,
105 .sync_offset = 0x3000,
106 .dma_mask = DMA_BIT_MASK(32),
107 .has_wide_gather = false,
108 .has_hypervisor = false,
109 .num_sid_entries = 0,
110 .sid_table = NULL,
111 .reserve_vblank_syncpts = true,
112};
113
114static const struct host1x_info host1x04_info = {
115 .nb_channels = 12,
116 .nb_pts = 192,
117 .nb_mlocks = 16,
118 .nb_bases = 64,
119 .init = host1x04_init,
120 .sync_offset = 0x2100,
121 .dma_mask = DMA_BIT_MASK(34),
122 .has_wide_gather = false,
123 .has_hypervisor = false,
124 .num_sid_entries = 0,
125 .sid_table = NULL,
126 .reserve_vblank_syncpts = false,
127};
128
129static const struct host1x_info host1x05_info = {
130 .nb_channels = 14,
131 .nb_pts = 192,
132 .nb_mlocks = 16,
133 .nb_bases = 64,
134 .init = host1x05_init,
135 .sync_offset = 0x2100,
136 .dma_mask = DMA_BIT_MASK(34),
137 .has_wide_gather = false,
138 .has_hypervisor = false,
139 .num_sid_entries = 0,
140 .sid_table = NULL,
141 .reserve_vblank_syncpts = false,
142};
143
144static const struct host1x_sid_entry tegra186_sid_table[] = {
145 { /* SE1 */ .base = 0x1ac8, .offset = 0x90, .limit = 0x90 },
146 { /* SE2 */ .base = 0x1ad0, .offset = 0x90, .limit = 0x90 },
147 { /* SE3 */ .base = 0x1ad8, .offset = 0x90, .limit = 0x90 },
148 { /* SE4 */ .base = 0x1ae0, .offset = 0x90, .limit = 0x90 },
149 { /* ISP */ .base = 0x1ae8, .offset = 0x50, .limit = 0x50 },
150 { /* VIC */ .base = 0x1af0, .offset = 0x30, .limit = 0x34 },
151 { /* NVENC */ .base = 0x1af8, .offset = 0x30, .limit = 0x34 },
152 { /* NVDEC */ .base = 0x1b00, .offset = 0x30, .limit = 0x34 },
153 { /* NVJPG */ .base = 0x1b08, .offset = 0x30, .limit = 0x34 },
154 { /* TSEC */ .base = 0x1b10, .offset = 0x30, .limit = 0x34 },
155 { /* TSECB */ .base = 0x1b18, .offset = 0x30, .limit = 0x34 },
156 { /* VI 0 */ .base = 0x1b80, .offset = 0x10000, .limit = 0x10000 },
157 { /* VI 1 */ .base = 0x1b88, .offset = 0x20000, .limit = 0x20000 },
158 { /* VI 2 */ .base = 0x1b90, .offset = 0x30000, .limit = 0x30000 },
159 { /* VI 3 */ .base = 0x1b98, .offset = 0x40000, .limit = 0x40000 },
160 { /* VI 4 */ .base = 0x1ba0, .offset = 0x50000, .limit = 0x50000 },
161 { /* VI 5 */ .base = 0x1ba8, .offset = 0x60000, .limit = 0x60000 },
162 { /* VI 6 */ .base = 0x1bb0, .offset = 0x70000, .limit = 0x70000 },
163 { /* VI 7 */ .base = 0x1bb8, .offset = 0x80000, .limit = 0x80000 },
164 { /* VI 8 */ .base = 0x1bc0, .offset = 0x90000, .limit = 0x90000 },
165 { /* VI 9 */ .base = 0x1bc8, .offset = 0xa0000, .limit = 0xa0000 },
166 { /* VI 10 */ .base = 0x1bd0, .offset = 0xb0000, .limit = 0xb0000 },
167 { /* VI 11 */ .base = 0x1bd8, .offset = 0xc0000, .limit = 0xc0000 },
168};
169
170static const struct host1x_info host1x06_info = {
171 .nb_channels = 63,
172 .nb_pts = 576,
173 .nb_mlocks = 24,
174 .nb_bases = 16,
175 .init = host1x06_init,
176 .sync_offset = 0x0,
177 .dma_mask = DMA_BIT_MASK(40),
178 .has_wide_gather = true,
179 .has_hypervisor = true,
180 .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
181 .sid_table = tegra186_sid_table,
182 .reserve_vblank_syncpts = false,
183 .skip_reset_assert = true,
184};
185
186static const struct host1x_sid_entry tegra194_sid_table[] = {
187 { /* SE1 */ .base = 0x1ac8, .offset = 0x90, .limit = 0x90 },
188 { /* SE2 */ .base = 0x1ad0, .offset = 0x90, .limit = 0x90 },
189 { /* SE3 */ .base = 0x1ad8, .offset = 0x90, .limit = 0x90 },
190 { /* SE4 */ .base = 0x1ae0, .offset = 0x90, .limit = 0x90 },
191 { /* ISP */ .base = 0x1ae8, .offset = 0x800, .limit = 0x800 },
192 { /* VIC */ .base = 0x1af0, .offset = 0x30, .limit = 0x34 },
193 { /* NVENC */ .base = 0x1af8, .offset = 0x30, .limit = 0x34 },
194 { /* NVDEC */ .base = 0x1b00, .offset = 0x30, .limit = 0x34 },
195 { /* NVJPG */ .base = 0x1b08, .offset = 0x30, .limit = 0x34 },
196 { /* TSEC */ .base = 0x1b10, .offset = 0x30, .limit = 0x34 },
197 { /* TSECB */ .base = 0x1b18, .offset = 0x30, .limit = 0x34 },
198 { /* VI */ .base = 0x1b80, .offset = 0x800, .limit = 0x800 },
199 { /* VI_THI */ .base = 0x1b88, .offset = 0x30, .limit = 0x34 },
200 { /* ISP_THI */ .base = 0x1b90, .offset = 0x30, .limit = 0x34 },
201 { /* PVA0_CLUSTER */ .base = 0x1b98, .offset = 0x0, .limit = 0x0 },
202 { /* PVA0_CLUSTER */ .base = 0x1ba0, .offset = 0x0, .limit = 0x0 },
203 { /* NVDLA0 */ .base = 0x1ba8, .offset = 0x30, .limit = 0x34 },
204 { /* NVDLA1 */ .base = 0x1bb0, .offset = 0x30, .limit = 0x34 },
205 { /* NVENC1 */ .base = 0x1bb8, .offset = 0x30, .limit = 0x34 },
206 { /* NVDEC1 */ .base = 0x1bc0, .offset = 0x30, .limit = 0x34 },
207};
208
209static const struct host1x_info host1x07_info = {
210 .nb_channels = 63,
211 .nb_pts = 704,
212 .nb_mlocks = 32,
213 .nb_bases = 0,
214 .init = host1x07_init,
215 .sync_offset = 0x0,
216 .dma_mask = DMA_BIT_MASK(40),
217 .has_wide_gather = true,
218 .has_hypervisor = true,
219 .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
220 .sid_table = tegra194_sid_table,
221 .reserve_vblank_syncpts = false,
222};
223
224/*
225 * Tegra234 has two stream ID protection tables, one for setting stream IDs
226 * through the channel path via SETSTREAMID, and one for setting them via
227 * MMIO. We program each engine's data stream ID in the channel path table
228 * and firmware stream ID in the MMIO path table.
229 */
230static const struct host1x_sid_entry tegra234_sid_table[] = {
231 { /* SE1 MMIO */ .base = 0x1650, .offset = 0x90, .limit = 0x90 },
232 { /* SE1 ch */ .base = 0x1730, .offset = 0x90, .limit = 0x90 },
233 { /* SE2 MMIO */ .base = 0x1658, .offset = 0x90, .limit = 0x90 },
234 { /* SE2 ch */ .base = 0x1738, .offset = 0x90, .limit = 0x90 },
235 { /* SE4 MMIO */ .base = 0x1660, .offset = 0x90, .limit = 0x90 },
236 { /* SE4 ch */ .base = 0x1740, .offset = 0x90, .limit = 0x90 },
237 { /* ISP MMIO */ .base = 0x1680, .offset = 0x800, .limit = 0x800 },
238 { /* VIC MMIO */ .base = 0x1688, .offset = 0x34, .limit = 0x34 },
239 { /* VIC ch */ .base = 0x17b8, .offset = 0x30, .limit = 0x30 },
240 { /* NVENC MMIO */ .base = 0x1690, .offset = 0x34, .limit = 0x34 },
241 { /* NVENC ch */ .base = 0x17c0, .offset = 0x30, .limit = 0x30 },
242 { /* NVDEC MMIO */ .base = 0x1698, .offset = 0x34, .limit = 0x34 },
243 { /* NVDEC ch */ .base = 0x17c8, .offset = 0x30, .limit = 0x30 },
244 { /* NVJPG MMIO */ .base = 0x16a0, .offset = 0x34, .limit = 0x34 },
245 { /* NVJPG ch */ .base = 0x17d0, .offset = 0x30, .limit = 0x30 },
246 { /* TSEC MMIO */ .base = 0x16a8, .offset = 0x30, .limit = 0x34 },
247 { /* NVJPG1 MMIO */ .base = 0x16b0, .offset = 0x34, .limit = 0x34 },
248 { /* NVJPG1 ch */ .base = 0x17a8, .offset = 0x30, .limit = 0x30 },
249 { /* VI MMIO */ .base = 0x16b8, .offset = 0x800, .limit = 0x800 },
250 { /* VI_THI MMIO */ .base = 0x16c0, .offset = 0x30, .limit = 0x34 },
251 { /* ISP_THI MMIO */ .base = 0x16c8, .offset = 0x30, .limit = 0x34 },
252 { /* NVDLA MMIO */ .base = 0x16d8, .offset = 0x30, .limit = 0x34 },
253 { /* NVDLA ch */ .base = 0x17e0, .offset = 0x30, .limit = 0x34 },
254 { /* NVDLA1 MMIO */ .base = 0x16e0, .offset = 0x30, .limit = 0x34 },
255 { /* NVDLA1 ch */ .base = 0x17e8, .offset = 0x30, .limit = 0x34 },
256 { /* OFA MMIO */ .base = 0x16e8, .offset = 0x34, .limit = 0x34 },
257 { /* OFA ch */ .base = 0x1768, .offset = 0x30, .limit = 0x30 },
258 { /* VI2 MMIO */ .base = 0x16f0, .offset = 0x800, .limit = 0x800 },
259 { /* VI2_THI MMIO */ .base = 0x16f8, .offset = 0x30, .limit = 0x34 },
260};
261
262static const struct host1x_info host1x08_info = {
263 .nb_channels = 63,
264 .nb_pts = 1024,
265 .nb_mlocks = 24,
266 .nb_bases = 0,
267 .init = host1x08_init,
268 .sync_offset = 0x0,
269 .dma_mask = DMA_BIT_MASK(40),
270 .has_wide_gather = true,
271 .has_hypervisor = true,
272 .has_common = true,
273 .num_sid_entries = ARRAY_SIZE(tegra234_sid_table),
274 .sid_table = tegra234_sid_table,
275 .streamid_vm_table = { 0x1004, 128 },
276 .classid_vm_table = { 0x1404, 25 },
277 .mmio_vm_table = { 0x1504, 25 },
278 .reserve_vblank_syncpts = false,
279};
280
281static const struct of_device_id host1x_of_match[] = {
282 { .compatible = "nvidia,tegra234-host1x", .data = &host1x08_info, },
283 { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
284 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
285 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
286 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
287 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
288 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
289 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
290 { },
291};
292MODULE_DEVICE_TABLE(of, host1x_of_match);
293
294static void host1x_setup_virtualization_tables(struct host1x *host)
295{
296 const struct host1x_info *info = host->info;
297 unsigned int i;
298
299 if (!info->has_hypervisor)
300 return;
301
302 for (i = 0; i < info->num_sid_entries; i++) {
303 const struct host1x_sid_entry *entry = &info->sid_table[i];
304
305 host1x_hypervisor_writel(host, entry->offset, entry->base);
306 host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
307 }
308
309 for (i = 0; i < info->streamid_vm_table.count; i++) {
310 /* Allow access to all stream IDs to all VMs. */
311 host1x_hypervisor_writel(host, 0xff, info->streamid_vm_table.base + 4 * i);
312 }
313
314 for (i = 0; i < info->classid_vm_table.count; i++) {
315 /* Allow access to all classes to all VMs. */
316 host1x_hypervisor_writel(host, 0xff, info->classid_vm_table.base + 4 * i);
317 }
318
319 for (i = 0; i < info->mmio_vm_table.count; i++) {
320 /* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */
321 host1x_hypervisor_writel(host, 0x1, info->mmio_vm_table.base + 4 * i);
322 }
323}
324
325static bool host1x_wants_iommu(struct host1x *host1x)
326{
327 /* Our IOMMU usage policy doesn't currently play well with GART */
328 if (of_machine_is_compatible("nvidia,tegra20"))
329 return false;
330
331 /*
332 * If we support addressing a maximum of 32 bits of physical memory
333 * and if the host1x firewall is enabled, there's no need to enable
334 * IOMMU support. This can happen for example on Tegra20, Tegra30
335 * and Tegra114.
336 *
337 * Tegra124 and later can address up to 34 bits of physical memory and
338 * many platforms come equipped with more than 2 GiB of system memory,
339 * which requires crossing the 4 GiB boundary. But there's a catch: on
340 * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
341 * only address up to 32 bits of memory in GATHER opcodes, which means
342 * that command buffers need to either be in the first 2 GiB of system
343 * memory (which could quickly lead to memory exhaustion), or command
344 * buffers need to be treated differently from other buffers (which is
345 * not possible with the current ABI).
346 *
347 * A third option is to use the IOMMU in these cases to make sure all
348 * buffers will be mapped into a 32-bit IOVA space that host1x can
349 * address. This allows all of the system memory to be used and works
350 * within the limitations of the host1x on these SoCs.
351 *
352 * In summary, default to enable IOMMU on Tegra124 and later. For any
353 * of the earlier SoCs, only use the IOMMU for additional safety when
354 * the host1x firewall is disabled.
355 */
356 if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
357 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
358 return false;
359 }
360
361 return true;
362}
363
364static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
365{
366 struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
367 int err;
368
369#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
370 if (host->dev->archdata.mapping) {
371 struct dma_iommu_mapping *mapping =
372 to_dma_iommu_mapping(host->dev);
373 arm_iommu_detach_device(host->dev);
374 arm_iommu_release_mapping(mapping);
375
376 domain = iommu_get_domain_for_dev(host->dev);
377 }
378#endif
379
380 /*
381 * We may not always want to enable IOMMU support (for example if the
382 * host1x firewall is already enabled and we don't support addressing
383 * more than 32 bits of physical memory), so check for that first.
384 *
385 * Similarly, if host1x is already attached to an IOMMU (via the DMA
386 * API), don't try to attach again.
387 */
388 if (!host1x_wants_iommu(host) || domain)
389 return domain;
390
391 host->group = iommu_group_get(host->dev);
392 if (host->group) {
393 struct iommu_domain_geometry *geometry;
394 dma_addr_t start, end;
395 unsigned long order;
396
397 err = iova_cache_get();
398 if (err < 0)
399 goto put_group;
400
401 host->domain = iommu_paging_domain_alloc(host->dev);
402 if (IS_ERR(host->domain)) {
403 err = PTR_ERR(host->domain);
404 host->domain = NULL;
405 goto put_cache;
406 }
407
408 err = iommu_attach_group(host->domain, host->group);
409 if (err) {
410 if (err == -ENODEV)
411 err = 0;
412
413 goto free_domain;
414 }
415
416 geometry = &host->domain->geometry;
417 start = geometry->aperture_start & host->info->dma_mask;
418 end = geometry->aperture_end & host->info->dma_mask;
419
420 order = __ffs(host->domain->pgsize_bitmap);
421 init_iova_domain(&host->iova, 1UL << order, start >> order);
422 host->iova_end = end;
423
424 domain = host->domain;
425 }
426
427 return domain;
428
429free_domain:
430 iommu_domain_free(host->domain);
431 host->domain = NULL;
432put_cache:
433 iova_cache_put();
434put_group:
435 iommu_group_put(host->group);
436 host->group = NULL;
437
438 return ERR_PTR(err);
439}
440
441static int host1x_iommu_init(struct host1x *host)
442{
443 u64 mask = host->info->dma_mask;
444 struct iommu_domain *domain;
445 int err;
446
447 domain = host1x_iommu_attach(host);
448 if (IS_ERR(domain)) {
449 err = PTR_ERR(domain);
450 dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
451 return err;
452 }
453
454 /*
455 * If we're not behind an IOMMU make sure we don't get push buffers
456 * that are allocated outside of the range addressable by the GATHER
457 * opcode.
458 *
459 * Newer generations of Tegra (Tegra186 and later) support a wide
460 * variant of the GATHER opcode that allows addressing more bits.
461 */
462 if (!domain && !host->info->has_wide_gather)
463 mask = DMA_BIT_MASK(32);
464
465 err = dma_coerce_mask_and_coherent(host->dev, mask);
466 if (err < 0) {
467 dev_err(host->dev, "failed to set DMA mask: %d\n", err);
468 return err;
469 }
470
471 return 0;
472}
473
474static void host1x_iommu_exit(struct host1x *host)
475{
476 if (host->domain) {
477 put_iova_domain(&host->iova);
478 iommu_detach_group(host->domain, host->group);
479
480 iommu_domain_free(host->domain);
481 host->domain = NULL;
482
483 iova_cache_put();
484
485 iommu_group_put(host->group);
486 host->group = NULL;
487 }
488}
489
490static int host1x_get_resets(struct host1x *host)
491{
492 int err;
493
494 host->resets[0].id = "mc";
495 host->resets[1].id = "host1x";
496 host->nresets = ARRAY_SIZE(host->resets);
497
498 err = devm_reset_control_bulk_get_optional_exclusive_released(
499 host->dev, host->nresets, host->resets);
500 if (err) {
501 dev_err(host->dev, "failed to get reset: %d\n", err);
502 return err;
503 }
504
505 return 0;
506}
507
508static int host1x_probe(struct platform_device *pdev)
509{
510 struct host1x *host;
511 int err, i;
512
513 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
514 if (!host)
515 return -ENOMEM;
516
517 host->info = of_device_get_match_data(&pdev->dev);
518
519 if (host->info->has_hypervisor) {
520 host->regs = devm_platform_ioremap_resource_byname(pdev, "vm");
521 if (IS_ERR(host->regs))
522 return PTR_ERR(host->regs);
523
524 host->hv_regs = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
525 if (IS_ERR(host->hv_regs))
526 return PTR_ERR(host->hv_regs);
527
528 if (host->info->has_common) {
529 host->common_regs = devm_platform_ioremap_resource_byname(pdev, "common");
530 if (IS_ERR(host->common_regs))
531 return PTR_ERR(host->common_regs);
532 }
533 } else {
534 host->regs = devm_platform_ioremap_resource(pdev, 0);
535 if (IS_ERR(host->regs))
536 return PTR_ERR(host->regs);
537 }
538
539 for (i = 0; i < ARRAY_SIZE(host->syncpt_irqs); i++) {
540 char irq_name[] = "syncptX";
541
542 sprintf(irq_name, "syncpt%d", i);
543
544 err = platform_get_irq_byname_optional(pdev, irq_name);
545 if (err == -ENXIO)
546 break;
547 if (err < 0)
548 return err;
549
550 host->syncpt_irqs[i] = err;
551 }
552
553 host->num_syncpt_irqs = i;
554
555 /* Device tree without irq names */
556 if (i == 0) {
557 host->syncpt_irqs[0] = platform_get_irq(pdev, 0);
558 if (host->syncpt_irqs[0] < 0)
559 return host->syncpt_irqs[0];
560
561 host->num_syncpt_irqs = 1;
562 }
563
564 mutex_init(&host->devices_lock);
565 INIT_LIST_HEAD(&host->devices);
566 INIT_LIST_HEAD(&host->list);
567 host->dev = &pdev->dev;
568
569 /* set common host1x device data */
570 platform_set_drvdata(pdev, host);
571
572 host->dev->dma_parms = &host->dma_parms;
573 dma_set_max_seg_size(host->dev, UINT_MAX);
574
575 if (host->info->init) {
576 err = host->info->init(host);
577 if (err)
578 return err;
579 }
580
581 host->clk = devm_clk_get(&pdev->dev, NULL);
582 if (IS_ERR(host->clk)) {
583 err = PTR_ERR(host->clk);
584
585 if (err != -EPROBE_DEFER)
586 dev_err(&pdev->dev, "failed to get clock: %d\n", err);
587
588 return err;
589 }
590
591 err = host1x_get_resets(host);
592 if (err)
593 return err;
594
595 host1x_bo_cache_init(&host->cache);
596
597 err = host1x_iommu_init(host);
598 if (err < 0) {
599 dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
600 goto destroy_cache;
601 }
602
603 err = host1x_channel_list_init(&host->channel_list,
604 host->info->nb_channels);
605 if (err) {
606 dev_err(&pdev->dev, "failed to initialize channel list\n");
607 goto iommu_exit;
608 }
609
610 err = host1x_memory_context_list_init(host);
611 if (err) {
612 dev_err(&pdev->dev, "failed to initialize context list\n");
613 goto free_channels;
614 }
615
616 err = host1x_syncpt_init(host);
617 if (err) {
618 dev_err(&pdev->dev, "failed to initialize syncpts\n");
619 goto free_contexts;
620 }
621
622 mutex_init(&host->intr_mutex);
623
624 pm_runtime_enable(&pdev->dev);
625
626 err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
627 if (err)
628 goto pm_disable;
629
630 /* the driver's code isn't ready yet for the dynamic RPM */
631 err = pm_runtime_resume_and_get(&pdev->dev);
632 if (err)
633 goto pm_disable;
634
635 err = host1x_intr_init(host);
636 if (err) {
637 dev_err(&pdev->dev, "failed to initialize interrupts\n");
638 goto pm_put;
639 }
640
641 host1x_debug_init(host);
642
643 err = host1x_register(host);
644 if (err < 0)
645 goto deinit_debugfs;
646
647 err = devm_of_platform_populate(&pdev->dev);
648 if (err < 0)
649 goto unregister;
650
651 return 0;
652
653unregister:
654 host1x_unregister(host);
655deinit_debugfs:
656 host1x_debug_deinit(host);
657 host1x_intr_deinit(host);
658pm_put:
659 pm_runtime_put_sync_suspend(&pdev->dev);
660pm_disable:
661 pm_runtime_disable(&pdev->dev);
662 host1x_syncpt_deinit(host);
663free_contexts:
664 host1x_memory_context_list_free(&host->context_list);
665free_channels:
666 host1x_channel_list_free(&host->channel_list);
667iommu_exit:
668 host1x_iommu_exit(host);
669destroy_cache:
670 host1x_bo_cache_destroy(&host->cache);
671
672 return err;
673}
674
675static void host1x_remove(struct platform_device *pdev)
676{
677 struct host1x *host = platform_get_drvdata(pdev);
678
679 host1x_unregister(host);
680 host1x_debug_deinit(host);
681
682 pm_runtime_force_suspend(&pdev->dev);
683
684 host1x_intr_deinit(host);
685 host1x_syncpt_deinit(host);
686 host1x_memory_context_list_free(&host->context_list);
687 host1x_channel_list_free(&host->channel_list);
688 host1x_iommu_exit(host);
689 host1x_bo_cache_destroy(&host->cache);
690}
691
692static int __maybe_unused host1x_runtime_suspend(struct device *dev)
693{
694 struct host1x *host = dev_get_drvdata(dev);
695 int err;
696
697 host1x_channel_stop_all(host);
698 host1x_intr_stop(host);
699 host1x_syncpt_save(host);
700
701 if (!host->info->skip_reset_assert) {
702 err = reset_control_bulk_assert(host->nresets, host->resets);
703 if (err) {
704 dev_err(dev, "failed to assert reset: %d\n", err);
705 goto resume_host1x;
706 }
707
708 usleep_range(1000, 2000);
709 }
710
711 clk_disable_unprepare(host->clk);
712 reset_control_bulk_release(host->nresets, host->resets);
713
714 return 0;
715
716resume_host1x:
717 host1x_setup_virtualization_tables(host);
718 host1x_syncpt_restore(host);
719 host1x_intr_start(host);
720
721 return err;
722}
723
724static int __maybe_unused host1x_runtime_resume(struct device *dev)
725{
726 struct host1x *host = dev_get_drvdata(dev);
727 int err;
728
729 err = reset_control_bulk_acquire(host->nresets, host->resets);
730 if (err) {
731 dev_err(dev, "failed to acquire reset: %d\n", err);
732 return err;
733 }
734
735 err = clk_prepare_enable(host->clk);
736 if (err) {
737 dev_err(dev, "failed to enable clock: %d\n", err);
738 goto release_reset;
739 }
740
741 err = reset_control_bulk_deassert(host->nresets, host->resets);
742 if (err < 0) {
743 dev_err(dev, "failed to deassert reset: %d\n", err);
744 goto disable_clk;
745 }
746
747 host1x_setup_virtualization_tables(host);
748 host1x_syncpt_restore(host);
749 host1x_intr_start(host);
750
751 return 0;
752
753disable_clk:
754 clk_disable_unprepare(host->clk);
755release_reset:
756 reset_control_bulk_release(host->nresets, host->resets);
757
758 return err;
759}
760
761static const struct dev_pm_ops host1x_pm_ops = {
762 SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume,
763 NULL)
764 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
765};
766
767static struct platform_driver tegra_host1x_driver = {
768 .driver = {
769 .name = "tegra-host1x",
770 .of_match_table = host1x_of_match,
771 .pm = &host1x_pm_ops,
772 },
773 .probe = host1x_probe,
774 .remove = host1x_remove,
775};
776
777static struct platform_driver * const drivers[] = {
778 &tegra_host1x_driver,
779 &tegra_mipi_driver,
780};
781
782static int __init tegra_host1x_init(void)
783{
784 int err;
785
786 err = bus_register(&host1x_bus_type);
787 if (err < 0)
788 return err;
789
790 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
791 if (err < 0)
792 bus_unregister(&host1x_bus_type);
793
794 return err;
795}
796module_init(tegra_host1x_init);
797
798static void __exit tegra_host1x_exit(void)
799{
800 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
801 bus_unregister(&host1x_bus_type);
802}
803module_exit(tegra_host1x_exit);
804
805/**
806 * host1x_get_dma_mask() - query the supported DMA mask for host1x
807 * @host1x: host1x instance
808 *
809 * Note that this returns the supported DMA mask for host1x, which can be
810 * different from the applicable DMA mask under certain circumstances.
811 */
812u64 host1x_get_dma_mask(struct host1x *host1x)
813{
814 return host1x->info->dma_mask;
815}
816EXPORT_SYMBOL(host1x_get_dma_mask);
817
818MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
819MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
820MODULE_DESCRIPTION("Host1x driver for Tegra products");
821MODULE_LICENSE("GPL");