Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2022, NVIDIA Corporation.
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/dma-mapping.h>
9#include <linux/host1x.h>
10#include <linux/iommu.h>
11#include <linux/iopoll.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/of_device.h>
15#include <linux/of_platform.h>
16#include <linux/platform_device.h>
17#include <linux/pm_runtime.h>
18#include <linux/reset.h>
19
20#include <soc/tegra/mc.h>
21
22#include "drm.h"
23#include "falcon.h"
24#include "riscv.h"
25#include "vic.h"
26
27#define NVDEC_FALCON_DEBUGINFO 0x1094
28#define NVDEC_TFBIF_TRANSCFG 0x2c44
29
30struct nvdec_config {
31 const char *firmware;
32 unsigned int version;
33 bool supports_sid;
34 bool has_riscv;
35 bool has_extra_clocks;
36};
37
38struct nvdec {
39 struct falcon falcon;
40
41 void __iomem *regs;
42 struct tegra_drm_client client;
43 struct host1x_channel *channel;
44 struct device *dev;
45 struct clk_bulk_data clks[3];
46 unsigned int num_clks;
47 struct reset_control *reset;
48
49 /* Platform configuration */
50 const struct nvdec_config *config;
51
52 /* RISC-V specific data */
53 struct tegra_drm_riscv riscv;
54 phys_addr_t carveout_base;
55};
56
57static inline struct nvdec *to_nvdec(struct tegra_drm_client *client)
58{
59 return container_of(client, struct nvdec, client);
60}
61
62static inline void nvdec_writel(struct nvdec *nvdec, u32 value,
63 unsigned int offset)
64{
65 writel(value, nvdec->regs + offset);
66}
67
68static int nvdec_boot_falcon(struct nvdec *nvdec)
69{
70#ifdef CONFIG_IOMMU_API
71 struct iommu_fwspec *spec = dev_iommu_fwspec_get(nvdec->dev);
72#endif
73 int err;
74
75#ifdef CONFIG_IOMMU_API
76 if (nvdec->config->supports_sid && spec) {
77 u32 value;
78
79 value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW);
80 nvdec_writel(nvdec, value, NVDEC_TFBIF_TRANSCFG);
81
82 if (spec->num_ids > 0) {
83 value = spec->ids[0] & 0xffff;
84
85 nvdec_writel(nvdec, value, VIC_THI_STREAMID0);
86 nvdec_writel(nvdec, value, VIC_THI_STREAMID1);
87 }
88 }
89#endif
90
91 err = falcon_boot(&nvdec->falcon);
92 if (err < 0)
93 return err;
94
95 err = falcon_wait_idle(&nvdec->falcon);
96 if (err < 0) {
97 dev_err(nvdec->dev, "falcon boot timed out\n");
98 return err;
99 }
100
101 return 0;
102}
103
104static int nvdec_wait_debuginfo(struct nvdec *nvdec, const char *phase)
105{
106 int err;
107 u32 val;
108
109 err = readl_poll_timeout(nvdec->regs + NVDEC_FALCON_DEBUGINFO, val, val == 0x0, 10, 100000);
110 if (err) {
111 dev_err(nvdec->dev, "failed to boot %s, debuginfo=0x%x\n", phase, val);
112 return err;
113 }
114
115 return 0;
116}
117
118static int nvdec_boot_riscv(struct nvdec *nvdec)
119{
120 int err;
121
122 err = reset_control_acquire(nvdec->reset);
123 if (err)
124 return err;
125
126 nvdec_writel(nvdec, 0xabcd1234, NVDEC_FALCON_DEBUGINFO);
127
128 err = tegra_drm_riscv_boot_bootrom(&nvdec->riscv, nvdec->carveout_base, 1,
129 &nvdec->riscv.bl_desc);
130 if (err) {
131 dev_err(nvdec->dev, "failed to execute bootloader\n");
132 goto release_reset;
133 }
134
135 err = nvdec_wait_debuginfo(nvdec, "bootloader");
136 if (err)
137 goto release_reset;
138
139 err = reset_control_reset(nvdec->reset);
140 if (err)
141 goto release_reset;
142
143 nvdec_writel(nvdec, 0xabcd1234, NVDEC_FALCON_DEBUGINFO);
144
145 err = tegra_drm_riscv_boot_bootrom(&nvdec->riscv, nvdec->carveout_base, 1,
146 &nvdec->riscv.os_desc);
147 if (err) {
148 dev_err(nvdec->dev, "failed to execute firmware\n");
149 goto release_reset;
150 }
151
152 err = nvdec_wait_debuginfo(nvdec, "firmware");
153 if (err)
154 goto release_reset;
155
156release_reset:
157 reset_control_release(nvdec->reset);
158
159 return err;
160}
161
162static int nvdec_init(struct host1x_client *client)
163{
164 struct tegra_drm_client *drm = host1x_to_drm_client(client);
165 struct drm_device *dev = dev_get_drvdata(client->host);
166 struct tegra_drm *tegra = dev->dev_private;
167 struct nvdec *nvdec = to_nvdec(drm);
168 int err;
169
170 err = host1x_client_iommu_attach(client);
171 if (err < 0 && err != -ENODEV) {
172 dev_err(nvdec->dev, "failed to attach to domain: %d\n", err);
173 return err;
174 }
175
176 nvdec->channel = host1x_channel_request(client);
177 if (!nvdec->channel) {
178 err = -ENOMEM;
179 goto detach;
180 }
181
182 client->syncpts[0] = host1x_syncpt_request(client, 0);
183 if (!client->syncpts[0]) {
184 err = -ENOMEM;
185 goto free_channel;
186 }
187
188 pm_runtime_enable(client->dev);
189 pm_runtime_use_autosuspend(client->dev);
190 pm_runtime_set_autosuspend_delay(client->dev, 500);
191
192 err = tegra_drm_register_client(tegra, drm);
193 if (err < 0)
194 goto disable_rpm;
195
196 /*
197 * Inherit the DMA parameters (such as maximum segment size) from the
198 * parent host1x device.
199 */
200 client->dev->dma_parms = client->host->dma_parms;
201
202 return 0;
203
204disable_rpm:
205 pm_runtime_dont_use_autosuspend(client->dev);
206 pm_runtime_force_suspend(client->dev);
207
208 host1x_syncpt_put(client->syncpts[0]);
209free_channel:
210 host1x_channel_put(nvdec->channel);
211detach:
212 host1x_client_iommu_detach(client);
213
214 return err;
215}
216
217static int nvdec_exit(struct host1x_client *client)
218{
219 struct tegra_drm_client *drm = host1x_to_drm_client(client);
220 struct drm_device *dev = dev_get_drvdata(client->host);
221 struct tegra_drm *tegra = dev->dev_private;
222 struct nvdec *nvdec = to_nvdec(drm);
223 int err;
224
225 /* avoid a dangling pointer just in case this disappears */
226 client->dev->dma_parms = NULL;
227
228 err = tegra_drm_unregister_client(tegra, drm);
229 if (err < 0)
230 return err;
231
232 pm_runtime_dont_use_autosuspend(client->dev);
233 pm_runtime_force_suspend(client->dev);
234
235 host1x_syncpt_put(client->syncpts[0]);
236 host1x_channel_put(nvdec->channel);
237 host1x_client_iommu_detach(client);
238
239 nvdec->channel = NULL;
240
241 if (client->group) {
242 dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys,
243 nvdec->falcon.firmware.size, DMA_TO_DEVICE);
244 tegra_drm_free(tegra, nvdec->falcon.firmware.size,
245 nvdec->falcon.firmware.virt,
246 nvdec->falcon.firmware.iova);
247 } else {
248 dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size,
249 nvdec->falcon.firmware.virt,
250 nvdec->falcon.firmware.iova);
251 }
252
253 return 0;
254}
255
256static const struct host1x_client_ops nvdec_client_ops = {
257 .init = nvdec_init,
258 .exit = nvdec_exit,
259};
260
261static int nvdec_load_falcon_firmware(struct nvdec *nvdec)
262{
263 struct host1x_client *client = &nvdec->client.base;
264 struct tegra_drm *tegra = nvdec->client.drm;
265 dma_addr_t iova;
266 size_t size;
267 void *virt;
268 int err;
269
270 if (nvdec->falcon.firmware.virt)
271 return 0;
272
273 err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware);
274 if (err < 0)
275 return err;
276
277 size = nvdec->falcon.firmware.size;
278
279 if (!client->group) {
280 virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
281
282 err = dma_mapping_error(nvdec->dev, iova);
283 if (err < 0)
284 return err;
285 } else {
286 virt = tegra_drm_alloc(tegra, size, &iova);
287 }
288
289 nvdec->falcon.firmware.virt = virt;
290 nvdec->falcon.firmware.iova = iova;
291
292 err = falcon_load_firmware(&nvdec->falcon);
293 if (err < 0)
294 goto cleanup;
295
296 /*
297 * In this case we have received an IOVA from the shared domain, so we
298 * need to make sure to get the physical address so that the DMA API
299 * knows what memory pages to flush the cache for.
300 */
301 if (client->group) {
302 dma_addr_t phys;
303
304 phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE);
305
306 err = dma_mapping_error(nvdec->dev, phys);
307 if (err < 0)
308 goto cleanup;
309
310 nvdec->falcon.firmware.phys = phys;
311 }
312
313 return 0;
314
315cleanup:
316 if (!client->group)
317 dma_free_coherent(nvdec->dev, size, virt, iova);
318 else
319 tegra_drm_free(tegra, size, virt, iova);
320
321 return err;
322}
323
324static __maybe_unused int nvdec_runtime_resume(struct device *dev)
325{
326 struct nvdec *nvdec = dev_get_drvdata(dev);
327 int err;
328
329 err = clk_bulk_prepare_enable(nvdec->num_clks, nvdec->clks);
330 if (err < 0)
331 return err;
332
333 usleep_range(10, 20);
334
335 if (nvdec->config->has_riscv) {
336 err = nvdec_boot_riscv(nvdec);
337 if (err < 0)
338 goto disable;
339 } else {
340 err = nvdec_load_falcon_firmware(nvdec);
341 if (err < 0)
342 goto disable;
343
344 err = nvdec_boot_falcon(nvdec);
345 if (err < 0)
346 goto disable;
347 }
348
349 return 0;
350
351disable:
352 clk_bulk_disable_unprepare(nvdec->num_clks, nvdec->clks);
353 return err;
354}
355
356static __maybe_unused int nvdec_runtime_suspend(struct device *dev)
357{
358 struct nvdec *nvdec = dev_get_drvdata(dev);
359
360 host1x_channel_stop(nvdec->channel);
361
362 clk_bulk_disable_unprepare(nvdec->num_clks, nvdec->clks);
363
364 return 0;
365}
366
367static int nvdec_open_channel(struct tegra_drm_client *client,
368 struct tegra_drm_context *context)
369{
370 struct nvdec *nvdec = to_nvdec(client);
371
372 context->channel = host1x_channel_get(nvdec->channel);
373 if (!context->channel)
374 return -ENOMEM;
375
376 return 0;
377}
378
379static void nvdec_close_channel(struct tegra_drm_context *context)
380{
381 host1x_channel_put(context->channel);
382}
383
384static int nvdec_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
385{
386 *supported = true;
387
388 return 0;
389}
390
391static const struct tegra_drm_client_ops nvdec_ops = {
392 .open_channel = nvdec_open_channel,
393 .close_channel = nvdec_close_channel,
394 .submit = tegra_drm_submit,
395 .get_streamid_offset = tegra_drm_get_streamid_offset_thi,
396 .can_use_memory_ctx = nvdec_can_use_memory_ctx,
397};
398
399#define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin"
400
401static const struct nvdec_config nvdec_t210_config = {
402 .firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE,
403 .version = 0x21,
404 .supports_sid = false,
405};
406
407#define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin"
408
409static const struct nvdec_config nvdec_t186_config = {
410 .firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE,
411 .version = 0x18,
412 .supports_sid = true,
413};
414
415#define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin"
416
417static const struct nvdec_config nvdec_t194_config = {
418 .firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE,
419 .version = 0x19,
420 .supports_sid = true,
421};
422
423static const struct nvdec_config nvdec_t234_config = {
424 .version = 0x23,
425 .supports_sid = true,
426 .has_riscv = true,
427 .has_extra_clocks = true,
428};
429
430static const struct of_device_id tegra_nvdec_of_match[] = {
431 { .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config },
432 { .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config },
433 { .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config },
434 { .compatible = "nvidia,tegra234-nvdec", .data = &nvdec_t234_config },
435 { },
436};
437MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match);
438
439static int nvdec_probe(struct platform_device *pdev)
440{
441 struct device *dev = &pdev->dev;
442 struct host1x_syncpt **syncpts;
443 struct nvdec *nvdec;
444 u32 host_class;
445 int err;
446
447 /* inherit DMA mask from host1x parent */
448 err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
449 if (err < 0) {
450 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
451 return err;
452 }
453
454 nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL);
455 if (!nvdec)
456 return -ENOMEM;
457
458 nvdec->config = of_device_get_match_data(dev);
459
460 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
461 if (!syncpts)
462 return -ENOMEM;
463
464 nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
465 if (IS_ERR(nvdec->regs))
466 return PTR_ERR(nvdec->regs);
467
468 nvdec->clks[0].id = "nvdec";
469 nvdec->num_clks = 1;
470
471 if (nvdec->config->has_extra_clocks) {
472 nvdec->num_clks = 3;
473 nvdec->clks[1].id = "fuse";
474 nvdec->clks[2].id = "tsec_pka";
475 }
476
477 err = devm_clk_bulk_get(dev, nvdec->num_clks, nvdec->clks);
478 if (err) {
479 dev_err(&pdev->dev, "failed to get clock(s)\n");
480 return err;
481 }
482
483 err = clk_set_rate(nvdec->clks[0].clk, ULONG_MAX);
484 if (err < 0) {
485 dev_err(&pdev->dev, "failed to set clock rate\n");
486 return err;
487 }
488
489 err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class);
490 if (err < 0)
491 host_class = HOST1X_CLASS_NVDEC;
492
493 if (nvdec->config->has_riscv) {
494 struct tegra_mc *mc;
495
496 mc = devm_tegra_memory_controller_get(dev);
497 if (IS_ERR(mc)) {
498 dev_err_probe(dev, PTR_ERR(mc),
499 "failed to get memory controller handle\n");
500 return PTR_ERR(mc);
501 }
502
503 err = tegra_mc_get_carveout_info(mc, 1, &nvdec->carveout_base, NULL);
504 if (err) {
505 dev_err(dev, "failed to get carveout info: %d\n", err);
506 return err;
507 }
508
509 nvdec->reset = devm_reset_control_get_exclusive_released(dev, "nvdec");
510 if (IS_ERR(nvdec->reset)) {
511 dev_err_probe(dev, PTR_ERR(nvdec->reset), "failed to get reset\n");
512 return PTR_ERR(nvdec->reset);
513 }
514
515 nvdec->riscv.dev = dev;
516 nvdec->riscv.regs = nvdec->regs;
517
518 err = tegra_drm_riscv_read_descriptors(&nvdec->riscv);
519 if (err < 0)
520 return err;
521 } else {
522 nvdec->falcon.dev = dev;
523 nvdec->falcon.regs = nvdec->regs;
524
525 err = falcon_init(&nvdec->falcon);
526 if (err < 0)
527 return err;
528 }
529
530 platform_set_drvdata(pdev, nvdec);
531
532 INIT_LIST_HEAD(&nvdec->client.base.list);
533 nvdec->client.base.ops = &nvdec_client_ops;
534 nvdec->client.base.dev = dev;
535 nvdec->client.base.class = host_class;
536 nvdec->client.base.syncpts = syncpts;
537 nvdec->client.base.num_syncpts = 1;
538 nvdec->dev = dev;
539
540 INIT_LIST_HEAD(&nvdec->client.list);
541 nvdec->client.version = nvdec->config->version;
542 nvdec->client.ops = &nvdec_ops;
543
544 err = host1x_client_register(&nvdec->client.base);
545 if (err < 0) {
546 dev_err(dev, "failed to register host1x client: %d\n", err);
547 goto exit_falcon;
548 }
549
550 return 0;
551
552exit_falcon:
553 falcon_exit(&nvdec->falcon);
554
555 return err;
556}
557
558static int nvdec_remove(struct platform_device *pdev)
559{
560 struct nvdec *nvdec = platform_get_drvdata(pdev);
561 int err;
562
563 err = host1x_client_unregister(&nvdec->client.base);
564 if (err < 0) {
565 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
566 err);
567 return err;
568 }
569
570 falcon_exit(&nvdec->falcon);
571
572 return 0;
573}
574
575static const struct dev_pm_ops nvdec_pm_ops = {
576 SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL)
577 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
578 pm_runtime_force_resume)
579};
580
581struct platform_driver tegra_nvdec_driver = {
582 .driver = {
583 .name = "tegra-nvdec",
584 .of_match_table = tegra_nvdec_of_match,
585 .pm = &nvdec_pm_ops
586 },
587 .probe = nvdec_probe,
588 .remove = nvdec_remove,
589};
590
591#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
592MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE);
593#endif
594#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
595MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE);
596#endif
597#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
598MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE);
599#endif