Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6#include <linux/component.h>
7#include <linux/dma-mapping.h>
8#include <linux/module.h>
9#include <linux/of.h>
10#include <linux/of_device.h>
11#include <linux/platform_device.h>
12#include <linux/uaccess.h>
13
14#include <drm/drm_debugfs.h>
15#include <drm/drm_drv.h>
16#include <drm/drm_file.h>
17#include <drm/drm_ioctl.h>
18#include <drm/drm_of.h>
19#include <drm/drm_prime.h>
20
21#include "etnaviv_cmdbuf.h"
22#include "etnaviv_drv.h"
23#include "etnaviv_gpu.h"
24#include "etnaviv_gem.h"
25#include "etnaviv_mmu.h"
26#include "etnaviv_perfmon.h"
27
28/*
29 * DRM operations:
30 */
31
32static struct device_node *etnaviv_of_first_available_node(void)
33{
34 struct device_node *np;
35
36 for_each_compatible_node(np, NULL, "vivante,gc") {
37 if (of_device_is_available(np))
38 return np;
39 }
40
41 return NULL;
42}
43
44static void load_gpu(struct drm_device *dev)
45{
46 struct etnaviv_drm_private *priv = dev->dev_private;
47 unsigned int i;
48
49 for (i = 0; i < ETNA_MAX_PIPES; i++) {
50 struct etnaviv_gpu *g = priv->gpu[i];
51
52 if (g) {
53 int ret;
54
55 ret = etnaviv_gpu_init(g);
56 if (ret)
57 priv->gpu[i] = NULL;
58 }
59 }
60}
61
62static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
63{
64 struct etnaviv_drm_private *priv = dev->dev_private;
65 struct etnaviv_file_private *ctx;
66 int ret, i;
67
68 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
69 if (!ctx)
70 return -ENOMEM;
71
72 ret = xa_alloc_cyclic(&priv->active_contexts, &ctx->id, ctx,
73 xa_limit_32b, &priv->next_context_id, GFP_KERNEL);
74 if (ret < 0)
75 goto out_free;
76
77 ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
78 priv->cmdbuf_suballoc);
79 if (!ctx->mmu) {
80 ret = -ENOMEM;
81 goto out_free;
82 }
83
84 for (i = 0; i < ETNA_MAX_PIPES; i++) {
85 struct etnaviv_gpu *gpu = priv->gpu[i];
86 struct drm_gpu_scheduler *sched;
87
88 if (gpu) {
89 sched = &gpu->sched;
90 drm_sched_entity_init(&ctx->sched_entity[i],
91 DRM_SCHED_PRIORITY_NORMAL, &sched,
92 1, NULL);
93 }
94 }
95
96 file->driver_priv = ctx;
97
98 return 0;
99
100out_free:
101 kfree(ctx);
102 return ret;
103}
104
105static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
106{
107 struct etnaviv_drm_private *priv = dev->dev_private;
108 struct etnaviv_file_private *ctx = file->driver_priv;
109 unsigned int i;
110
111 for (i = 0; i < ETNA_MAX_PIPES; i++) {
112 struct etnaviv_gpu *gpu = priv->gpu[i];
113
114 if (gpu)
115 drm_sched_entity_destroy(&ctx->sched_entity[i]);
116 }
117
118 etnaviv_iommu_context_put(ctx->mmu);
119
120 xa_erase(&priv->active_contexts, ctx->id);
121
122 kfree(ctx);
123}
124
125/*
126 * DRM debugfs:
127 */
128
129#ifdef CONFIG_DEBUG_FS
130static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
131{
132 struct etnaviv_drm_private *priv = dev->dev_private;
133
134 etnaviv_gem_describe_objects(priv, m);
135
136 return 0;
137}
138
139static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
140{
141 struct drm_printer p = drm_seq_file_printer(m);
142
143 read_lock(&dev->vma_offset_manager->vm_lock);
144 drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
145 read_unlock(&dev->vma_offset_manager->vm_lock);
146
147 return 0;
148}
149
150static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
151{
152 struct drm_printer p = drm_seq_file_printer(m);
153 struct etnaviv_iommu_context *mmu_context;
154
155 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
156
157 /*
158 * Lock the GPU to avoid a MMU context switch just now and elevate
159 * the refcount of the current context to avoid it disappearing from
160 * under our feet.
161 */
162 mutex_lock(&gpu->lock);
163 mmu_context = gpu->mmu_context;
164 if (mmu_context)
165 etnaviv_iommu_context_get(mmu_context);
166 mutex_unlock(&gpu->lock);
167
168 if (!mmu_context)
169 return 0;
170
171 mutex_lock(&mmu_context->lock);
172 drm_mm_print(&mmu_context->mm, &p);
173 mutex_unlock(&mmu_context->lock);
174
175 etnaviv_iommu_context_put(mmu_context);
176
177 return 0;
178}
179
180static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
181{
182 struct etnaviv_cmdbuf *buf = &gpu->buffer;
183 u32 size = buf->size;
184 u32 *ptr = buf->vaddr;
185 u32 i;
186
187 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
188 buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
189 size - buf->user_size);
190
191 for (i = 0; i < size / 4; i++) {
192 if (i && !(i % 4))
193 seq_puts(m, "\n");
194 if (i % 4 == 0)
195 seq_printf(m, "\t0x%p: ", ptr + i);
196 seq_printf(m, "%08x ", *(ptr + i));
197 }
198 seq_puts(m, "\n");
199}
200
201static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
202{
203 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
204
205 mutex_lock(&gpu->lock);
206 etnaviv_buffer_dump(gpu, m);
207 mutex_unlock(&gpu->lock);
208
209 return 0;
210}
211
212static int show_unlocked(struct seq_file *m, void *arg)
213{
214 struct drm_info_node *node = (struct drm_info_node *) m->private;
215 struct drm_device *dev = node->minor->dev;
216 int (*show)(struct drm_device *dev, struct seq_file *m) =
217 node->info_ent->data;
218
219 return show(dev, m);
220}
221
222static int show_each_gpu(struct seq_file *m, void *arg)
223{
224 struct drm_info_node *node = (struct drm_info_node *) m->private;
225 struct drm_device *dev = node->minor->dev;
226 struct etnaviv_drm_private *priv = dev->dev_private;
227 struct etnaviv_gpu *gpu;
228 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
229 node->info_ent->data;
230 unsigned int i;
231 int ret = 0;
232
233 for (i = 0; i < ETNA_MAX_PIPES; i++) {
234 gpu = priv->gpu[i];
235 if (!gpu)
236 continue;
237
238 ret = show(gpu, m);
239 if (ret < 0)
240 break;
241 }
242
243 return ret;
244}
245
246static struct drm_info_list etnaviv_debugfs_list[] = {
247 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
248 {"gem", show_unlocked, 0, etnaviv_gem_show},
249 { "mm", show_unlocked, 0, etnaviv_mm_show },
250 {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
251 {"ring", show_each_gpu, 0, etnaviv_ring_show},
252};
253
254static void etnaviv_debugfs_init(struct drm_minor *minor)
255{
256 drm_debugfs_create_files(etnaviv_debugfs_list,
257 ARRAY_SIZE(etnaviv_debugfs_list),
258 minor->debugfs_root, minor);
259}
260#endif
261
262/*
263 * DRM ioctls:
264 */
265
266static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
267 struct drm_file *file)
268{
269 struct etnaviv_drm_private *priv = dev->dev_private;
270 struct drm_etnaviv_param *args = data;
271 struct etnaviv_gpu *gpu;
272
273 if (args->pipe >= ETNA_MAX_PIPES)
274 return -EINVAL;
275
276 gpu = priv->gpu[args->pipe];
277 if (!gpu)
278 return -ENXIO;
279
280 return etnaviv_gpu_get_param(gpu, args->param, &args->value);
281}
282
283static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
284 struct drm_file *file)
285{
286 struct drm_etnaviv_gem_new *args = data;
287
288 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
289 ETNA_BO_FORCE_MMU))
290 return -EINVAL;
291
292 return etnaviv_gem_new_handle(dev, file, args->size,
293 args->flags, &args->handle);
294}
295
296static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
297 struct drm_file *file)
298{
299 struct drm_etnaviv_gem_cpu_prep *args = data;
300 struct drm_gem_object *obj;
301 int ret;
302
303 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
304 return -EINVAL;
305
306 obj = drm_gem_object_lookup(file, args->handle);
307 if (!obj)
308 return -ENOENT;
309
310 ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
311
312 drm_gem_object_put(obj);
313
314 return ret;
315}
316
317static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
318 struct drm_file *file)
319{
320 struct drm_etnaviv_gem_cpu_fini *args = data;
321 struct drm_gem_object *obj;
322 int ret;
323
324 if (args->flags)
325 return -EINVAL;
326
327 obj = drm_gem_object_lookup(file, args->handle);
328 if (!obj)
329 return -ENOENT;
330
331 ret = etnaviv_gem_cpu_fini(obj);
332
333 drm_gem_object_put(obj);
334
335 return ret;
336}
337
338static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
339 struct drm_file *file)
340{
341 struct drm_etnaviv_gem_info *args = data;
342 struct drm_gem_object *obj;
343 int ret;
344
345 if (args->pad)
346 return -EINVAL;
347
348 obj = drm_gem_object_lookup(file, args->handle);
349 if (!obj)
350 return -ENOENT;
351
352 ret = etnaviv_gem_mmap_offset(obj, &args->offset);
353 drm_gem_object_put(obj);
354
355 return ret;
356}
357
358static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
359 struct drm_file *file)
360{
361 struct drm_etnaviv_wait_fence *args = data;
362 struct etnaviv_drm_private *priv = dev->dev_private;
363 struct drm_etnaviv_timespec *timeout = &args->timeout;
364 struct etnaviv_gpu *gpu;
365
366 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
367 return -EINVAL;
368
369 if (args->pipe >= ETNA_MAX_PIPES)
370 return -EINVAL;
371
372 gpu = priv->gpu[args->pipe];
373 if (!gpu)
374 return -ENXIO;
375
376 if (args->flags & ETNA_WAIT_NONBLOCK)
377 timeout = NULL;
378
379 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
380 timeout);
381}
382
383static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
384 struct drm_file *file)
385{
386 struct drm_etnaviv_gem_userptr *args = data;
387
388 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
389 args->flags == 0)
390 return -EINVAL;
391
392 if (offset_in_page(args->user_ptr | args->user_size) ||
393 (uintptr_t)args->user_ptr != args->user_ptr ||
394 (u32)args->user_size != args->user_size ||
395 args->user_ptr & ~PAGE_MASK)
396 return -EINVAL;
397
398 if (!access_ok((void __user *)(unsigned long)args->user_ptr,
399 args->user_size))
400 return -EFAULT;
401
402 return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
403 args->user_size, args->flags,
404 &args->handle);
405}
406
407static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
408 struct drm_file *file)
409{
410 struct etnaviv_drm_private *priv = dev->dev_private;
411 struct drm_etnaviv_gem_wait *args = data;
412 struct drm_etnaviv_timespec *timeout = &args->timeout;
413 struct drm_gem_object *obj;
414 struct etnaviv_gpu *gpu;
415 int ret;
416
417 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
418 return -EINVAL;
419
420 if (args->pipe >= ETNA_MAX_PIPES)
421 return -EINVAL;
422
423 gpu = priv->gpu[args->pipe];
424 if (!gpu)
425 return -ENXIO;
426
427 obj = drm_gem_object_lookup(file, args->handle);
428 if (!obj)
429 return -ENOENT;
430
431 if (args->flags & ETNA_WAIT_NONBLOCK)
432 timeout = NULL;
433
434 ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
435
436 drm_gem_object_put(obj);
437
438 return ret;
439}
440
441static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
442 struct drm_file *file)
443{
444 struct etnaviv_drm_private *priv = dev->dev_private;
445 struct drm_etnaviv_pm_domain *args = data;
446 struct etnaviv_gpu *gpu;
447
448 if (args->pipe >= ETNA_MAX_PIPES)
449 return -EINVAL;
450
451 gpu = priv->gpu[args->pipe];
452 if (!gpu)
453 return -ENXIO;
454
455 return etnaviv_pm_query_dom(gpu, args);
456}
457
458static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
459 struct drm_file *file)
460{
461 struct etnaviv_drm_private *priv = dev->dev_private;
462 struct drm_etnaviv_pm_signal *args = data;
463 struct etnaviv_gpu *gpu;
464
465 if (args->pipe >= ETNA_MAX_PIPES)
466 return -EINVAL;
467
468 gpu = priv->gpu[args->pipe];
469 if (!gpu)
470 return -ENXIO;
471
472 return etnaviv_pm_query_sig(gpu, args);
473}
474
475static const struct drm_ioctl_desc etnaviv_ioctls[] = {
476#define ETNA_IOCTL(n, func, flags) \
477 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
478 ETNA_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
479 ETNA_IOCTL(GEM_NEW, gem_new, DRM_RENDER_ALLOW),
480 ETNA_IOCTL(GEM_INFO, gem_info, DRM_RENDER_ALLOW),
481 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW),
482 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW),
483 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_RENDER_ALLOW),
484 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_RENDER_ALLOW),
485 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_RENDER_ALLOW),
486 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_RENDER_ALLOW),
487 ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW),
488 ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
489};
490
491DEFINE_DRM_GEM_FOPS(fops);
492
493static const struct drm_driver etnaviv_drm_driver = {
494 .driver_features = DRIVER_GEM | DRIVER_RENDER,
495 .open = etnaviv_open,
496 .postclose = etnaviv_postclose,
497 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
498#ifdef CONFIG_DEBUG_FS
499 .debugfs_init = etnaviv_debugfs_init,
500#endif
501 .ioctls = etnaviv_ioctls,
502 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
503 .fops = &fops,
504 .name = "etnaviv",
505 .desc = "etnaviv DRM",
506 .date = "20151214",
507 .major = 1,
508 .minor = 4,
509};
510
511/*
512 * Platform driver:
513 */
514static int etnaviv_bind(struct device *dev)
515{
516 struct etnaviv_drm_private *priv;
517 struct drm_device *drm;
518 int ret;
519
520 drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
521 if (IS_ERR(drm))
522 return PTR_ERR(drm);
523
524 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
525 if (!priv) {
526 dev_err(dev, "failed to allocate private data\n");
527 ret = -ENOMEM;
528 goto out_put;
529 }
530 drm->dev_private = priv;
531
532 dma_set_max_seg_size(dev, SZ_2G);
533
534 xa_init_flags(&priv->active_contexts, XA_FLAGS_ALLOC);
535
536 mutex_init(&priv->gem_lock);
537 INIT_LIST_HEAD(&priv->gem_list);
538 priv->num_gpus = 0;
539 priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
540
541 /*
542 * If the GPU is part of a system with DMA addressing limitations,
543 * request pages for our SHM backend buffers from the DMA32 zone to
544 * hopefully avoid performance killing SWIOTLB bounce buffering.
545 */
546 if (dma_addressing_limited(dev)) {
547 priv->shm_gfp_mask |= GFP_DMA32;
548 priv->shm_gfp_mask &= ~__GFP_HIGHMEM;
549 }
550
551 priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
552 if (IS_ERR(priv->cmdbuf_suballoc)) {
553 dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
554 ret = PTR_ERR(priv->cmdbuf_suballoc);
555 goto out_free_priv;
556 }
557
558 dev_set_drvdata(dev, drm);
559
560 ret = component_bind_all(dev, drm);
561 if (ret < 0)
562 goto out_destroy_suballoc;
563
564 load_gpu(drm);
565
566 ret = drm_dev_register(drm, 0);
567 if (ret)
568 goto out_unbind;
569
570 return 0;
571
572out_unbind:
573 component_unbind_all(dev, drm);
574out_destroy_suballoc:
575 etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
576out_free_priv:
577 mutex_destroy(&priv->gem_lock);
578 kfree(priv);
579out_put:
580 drm_dev_put(drm);
581
582 return ret;
583}
584
585static void etnaviv_unbind(struct device *dev)
586{
587 struct drm_device *drm = dev_get_drvdata(dev);
588 struct etnaviv_drm_private *priv = drm->dev_private;
589
590 drm_dev_unregister(drm);
591
592 component_unbind_all(dev, drm);
593
594 etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
595
596 xa_destroy(&priv->active_contexts);
597
598 drm->dev_private = NULL;
599 kfree(priv);
600
601 drm_dev_put(drm);
602}
603
604static const struct component_master_ops etnaviv_master_ops = {
605 .bind = etnaviv_bind,
606 .unbind = etnaviv_unbind,
607};
608
609static int etnaviv_pdev_probe(struct platform_device *pdev)
610{
611 struct device *dev = &pdev->dev;
612 struct device_node *first_node = NULL;
613 struct component_match *match = NULL;
614
615 if (!dev->platform_data) {
616 struct device_node *core_node;
617
618 for_each_compatible_node(core_node, NULL, "vivante,gc") {
619 if (!of_device_is_available(core_node))
620 continue;
621
622 drm_of_component_match_add(dev, &match,
623 component_compare_of, core_node);
624 }
625 } else {
626 char **names = dev->platform_data;
627 unsigned i;
628
629 for (i = 0; names[i]; i++)
630 component_match_add(dev, &match, component_compare_dev_name, names[i]);
631 }
632
633 /*
634 * PTA and MTLB can have 40 bit base addresses, but
635 * unfortunately, an entry in the MTLB can only point to a
636 * 32 bit base address of a STLB. Moreover, to initialize the
637 * MMU we need a command buffer with a 32 bit address because
638 * without an MMU there is only an indentity mapping between
639 * the internal 32 bit addresses and the bus addresses.
640 *
641 * To make things easy, we set the dma_coherent_mask to 32
642 * bit to make sure we are allocating the command buffers and
643 * TLBs in the lower 4 GiB address space.
644 */
645 if (dma_set_mask(dev, DMA_BIT_MASK(40)) ||
646 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
647 dev_dbg(dev, "No suitable DMA available\n");
648 return -ENODEV;
649 }
650
651 /*
652 * Apply the same DMA configuration to the virtual etnaviv
653 * device as the GPU we found. This assumes that all Vivante
654 * GPUs in the system share the same DMA constraints.
655 */
656 first_node = etnaviv_of_first_available_node();
657 if (first_node) {
658 of_dma_configure(dev, first_node, true);
659 of_node_put(first_node);
660 }
661
662 return component_master_add_with_match(dev, &etnaviv_master_ops, match);
663}
664
665static void etnaviv_pdev_remove(struct platform_device *pdev)
666{
667 component_master_del(&pdev->dev, &etnaviv_master_ops);
668}
669
670static struct platform_driver etnaviv_platform_driver = {
671 .probe = etnaviv_pdev_probe,
672 .remove = etnaviv_pdev_remove,
673 .driver = {
674 .name = "etnaviv",
675 },
676};
677
678static int etnaviv_create_platform_device(const char *name,
679 struct platform_device **ppdev)
680{
681 struct platform_device *pdev;
682 int ret;
683
684 pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
685 if (!pdev)
686 return -ENOMEM;
687
688 ret = platform_device_add(pdev);
689 if (ret) {
690 platform_device_put(pdev);
691 return ret;
692 }
693
694 *ppdev = pdev;
695
696 return 0;
697}
698
699static void etnaviv_destroy_platform_device(struct platform_device **ppdev)
700{
701 struct platform_device *pdev = *ppdev;
702
703 if (!pdev)
704 return;
705
706 platform_device_unregister(pdev);
707
708 *ppdev = NULL;
709}
710
711static struct platform_device *etnaviv_drm;
712
713static int __init etnaviv_init(void)
714{
715 int ret;
716 struct device_node *np;
717
718 etnaviv_validate_init();
719
720 ret = platform_driver_register(&etnaviv_gpu_driver);
721 if (ret != 0)
722 return ret;
723
724 ret = platform_driver_register(&etnaviv_platform_driver);
725 if (ret != 0)
726 goto unregister_gpu_driver;
727
728 /*
729 * If the DT contains at least one available GPU device, instantiate
730 * the DRM platform device.
731 */
732 np = etnaviv_of_first_available_node();
733 if (np) {
734 of_node_put(np);
735
736 ret = etnaviv_create_platform_device("etnaviv", &etnaviv_drm);
737 if (ret)
738 goto unregister_platform_driver;
739 }
740
741 return 0;
742
743unregister_platform_driver:
744 platform_driver_unregister(&etnaviv_platform_driver);
745unregister_gpu_driver:
746 platform_driver_unregister(&etnaviv_gpu_driver);
747 return ret;
748}
749module_init(etnaviv_init);
750
751static void __exit etnaviv_exit(void)
752{
753 etnaviv_destroy_platform_device(&etnaviv_drm);
754 platform_driver_unregister(&etnaviv_platform_driver);
755 platform_driver_unregister(&etnaviv_gpu_driver);
756}
757module_exit(etnaviv_exit);
758
759MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
760MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
761MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
762MODULE_DESCRIPTION("etnaviv DRM Driver");
763MODULE_LICENSE("GPL v2");
764MODULE_ALIAS("platform:etnaviv");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6#include <linux/component.h>
7#include <linux/dma-mapping.h>
8#include <linux/module.h>
9#include <linux/of_platform.h>
10#include <linux/uaccess.h>
11
12#include <drm/drm_debugfs.h>
13#include <drm/drm_drv.h>
14#include <drm/drm_file.h>
15#include <drm/drm_ioctl.h>
16#include <drm/drm_of.h>
17#include <drm/drm_prime.h>
18
19#include "etnaviv_cmdbuf.h"
20#include "etnaviv_drv.h"
21#include "etnaviv_gpu.h"
22#include "etnaviv_gem.h"
23#include "etnaviv_mmu.h"
24#include "etnaviv_perfmon.h"
25
26/*
27 * DRM operations:
28 */
29
30
31static void load_gpu(struct drm_device *dev)
32{
33 struct etnaviv_drm_private *priv = dev->dev_private;
34 unsigned int i;
35
36 for (i = 0; i < ETNA_MAX_PIPES; i++) {
37 struct etnaviv_gpu *g = priv->gpu[i];
38
39 if (g) {
40 int ret;
41
42 ret = etnaviv_gpu_init(g);
43 if (ret)
44 priv->gpu[i] = NULL;
45 }
46 }
47}
48
49static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
50{
51 struct etnaviv_drm_private *priv = dev->dev_private;
52 struct etnaviv_file_private *ctx;
53 int ret, i;
54
55 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
56 if (!ctx)
57 return -ENOMEM;
58
59 ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
60 priv->cmdbuf_suballoc);
61 if (!ctx->mmu) {
62 ret = -ENOMEM;
63 goto out_free;
64 }
65
66 for (i = 0; i < ETNA_MAX_PIPES; i++) {
67 struct etnaviv_gpu *gpu = priv->gpu[i];
68 struct drm_gpu_scheduler *sched;
69
70 if (gpu) {
71 sched = &gpu->sched;
72 drm_sched_entity_init(&ctx->sched_entity[i],
73 DRM_SCHED_PRIORITY_NORMAL, &sched,
74 1, NULL);
75 }
76 }
77
78 file->driver_priv = ctx;
79
80 return 0;
81
82out_free:
83 kfree(ctx);
84 return ret;
85}
86
87static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
88{
89 struct etnaviv_drm_private *priv = dev->dev_private;
90 struct etnaviv_file_private *ctx = file->driver_priv;
91 unsigned int i;
92
93 for (i = 0; i < ETNA_MAX_PIPES; i++) {
94 struct etnaviv_gpu *gpu = priv->gpu[i];
95
96 if (gpu)
97 drm_sched_entity_destroy(&ctx->sched_entity[i]);
98 }
99
100 etnaviv_iommu_context_put(ctx->mmu);
101
102 kfree(ctx);
103}
104
105/*
106 * DRM debugfs:
107 */
108
109#ifdef CONFIG_DEBUG_FS
110static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
111{
112 struct etnaviv_drm_private *priv = dev->dev_private;
113
114 etnaviv_gem_describe_objects(priv, m);
115
116 return 0;
117}
118
119static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
120{
121 struct drm_printer p = drm_seq_file_printer(m);
122
123 read_lock(&dev->vma_offset_manager->vm_lock);
124 drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
125 read_unlock(&dev->vma_offset_manager->vm_lock);
126
127 return 0;
128}
129
130static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
131{
132 struct drm_printer p = drm_seq_file_printer(m);
133 struct etnaviv_iommu_context *mmu_context;
134
135 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
136
137 /*
138 * Lock the GPU to avoid a MMU context switch just now and elevate
139 * the refcount of the current context to avoid it disappearing from
140 * under our feet.
141 */
142 mutex_lock(&gpu->lock);
143 mmu_context = gpu->mmu_context;
144 if (mmu_context)
145 etnaviv_iommu_context_get(mmu_context);
146 mutex_unlock(&gpu->lock);
147
148 if (!mmu_context)
149 return 0;
150
151 mutex_lock(&mmu_context->lock);
152 drm_mm_print(&mmu_context->mm, &p);
153 mutex_unlock(&mmu_context->lock);
154
155 etnaviv_iommu_context_put(mmu_context);
156
157 return 0;
158}
159
160static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
161{
162 struct etnaviv_cmdbuf *buf = &gpu->buffer;
163 u32 size = buf->size;
164 u32 *ptr = buf->vaddr;
165 u32 i;
166
167 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
168 buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
169 size - buf->user_size);
170
171 for (i = 0; i < size / 4; i++) {
172 if (i && !(i % 4))
173 seq_puts(m, "\n");
174 if (i % 4 == 0)
175 seq_printf(m, "\t0x%p: ", ptr + i);
176 seq_printf(m, "%08x ", *(ptr + i));
177 }
178 seq_puts(m, "\n");
179}
180
181static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
182{
183 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
184
185 mutex_lock(&gpu->lock);
186 etnaviv_buffer_dump(gpu, m);
187 mutex_unlock(&gpu->lock);
188
189 return 0;
190}
191
192static int show_unlocked(struct seq_file *m, void *arg)
193{
194 struct drm_info_node *node = (struct drm_info_node *) m->private;
195 struct drm_device *dev = node->minor->dev;
196 int (*show)(struct drm_device *dev, struct seq_file *m) =
197 node->info_ent->data;
198
199 return show(dev, m);
200}
201
202static int show_each_gpu(struct seq_file *m, void *arg)
203{
204 struct drm_info_node *node = (struct drm_info_node *) m->private;
205 struct drm_device *dev = node->minor->dev;
206 struct etnaviv_drm_private *priv = dev->dev_private;
207 struct etnaviv_gpu *gpu;
208 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
209 node->info_ent->data;
210 unsigned int i;
211 int ret = 0;
212
213 for (i = 0; i < ETNA_MAX_PIPES; i++) {
214 gpu = priv->gpu[i];
215 if (!gpu)
216 continue;
217
218 ret = show(gpu, m);
219 if (ret < 0)
220 break;
221 }
222
223 return ret;
224}
225
226static struct drm_info_list etnaviv_debugfs_list[] = {
227 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
228 {"gem", show_unlocked, 0, etnaviv_gem_show},
229 { "mm", show_unlocked, 0, etnaviv_mm_show },
230 {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
231 {"ring", show_each_gpu, 0, etnaviv_ring_show},
232};
233
234static void etnaviv_debugfs_init(struct drm_minor *minor)
235{
236 drm_debugfs_create_files(etnaviv_debugfs_list,
237 ARRAY_SIZE(etnaviv_debugfs_list),
238 minor->debugfs_root, minor);
239}
240#endif
241
242/*
243 * DRM ioctls:
244 */
245
246static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
247 struct drm_file *file)
248{
249 struct etnaviv_drm_private *priv = dev->dev_private;
250 struct drm_etnaviv_param *args = data;
251 struct etnaviv_gpu *gpu;
252
253 if (args->pipe >= ETNA_MAX_PIPES)
254 return -EINVAL;
255
256 gpu = priv->gpu[args->pipe];
257 if (!gpu)
258 return -ENXIO;
259
260 return etnaviv_gpu_get_param(gpu, args->param, &args->value);
261}
262
263static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
264 struct drm_file *file)
265{
266 struct drm_etnaviv_gem_new *args = data;
267
268 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
269 ETNA_BO_FORCE_MMU))
270 return -EINVAL;
271
272 return etnaviv_gem_new_handle(dev, file, args->size,
273 args->flags, &args->handle);
274}
275
276static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
277 struct drm_file *file)
278{
279 struct drm_etnaviv_gem_cpu_prep *args = data;
280 struct drm_gem_object *obj;
281 int ret;
282
283 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
284 return -EINVAL;
285
286 obj = drm_gem_object_lookup(file, args->handle);
287 if (!obj)
288 return -ENOENT;
289
290 ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
291
292 drm_gem_object_put(obj);
293
294 return ret;
295}
296
297static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
298 struct drm_file *file)
299{
300 struct drm_etnaviv_gem_cpu_fini *args = data;
301 struct drm_gem_object *obj;
302 int ret;
303
304 if (args->flags)
305 return -EINVAL;
306
307 obj = drm_gem_object_lookup(file, args->handle);
308 if (!obj)
309 return -ENOENT;
310
311 ret = etnaviv_gem_cpu_fini(obj);
312
313 drm_gem_object_put(obj);
314
315 return ret;
316}
317
318static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
319 struct drm_file *file)
320{
321 struct drm_etnaviv_gem_info *args = data;
322 struct drm_gem_object *obj;
323 int ret;
324
325 if (args->pad)
326 return -EINVAL;
327
328 obj = drm_gem_object_lookup(file, args->handle);
329 if (!obj)
330 return -ENOENT;
331
332 ret = etnaviv_gem_mmap_offset(obj, &args->offset);
333 drm_gem_object_put(obj);
334
335 return ret;
336}
337
338static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
339 struct drm_file *file)
340{
341 struct drm_etnaviv_wait_fence *args = data;
342 struct etnaviv_drm_private *priv = dev->dev_private;
343 struct drm_etnaviv_timespec *timeout = &args->timeout;
344 struct etnaviv_gpu *gpu;
345
346 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
347 return -EINVAL;
348
349 if (args->pipe >= ETNA_MAX_PIPES)
350 return -EINVAL;
351
352 gpu = priv->gpu[args->pipe];
353 if (!gpu)
354 return -ENXIO;
355
356 if (args->flags & ETNA_WAIT_NONBLOCK)
357 timeout = NULL;
358
359 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
360 timeout);
361}
362
363static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
364 struct drm_file *file)
365{
366 struct drm_etnaviv_gem_userptr *args = data;
367
368 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
369 args->flags == 0)
370 return -EINVAL;
371
372 if (offset_in_page(args->user_ptr | args->user_size) ||
373 (uintptr_t)args->user_ptr != args->user_ptr ||
374 (u32)args->user_size != args->user_size ||
375 args->user_ptr & ~PAGE_MASK)
376 return -EINVAL;
377
378 if (!access_ok((void __user *)(unsigned long)args->user_ptr,
379 args->user_size))
380 return -EFAULT;
381
382 return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
383 args->user_size, args->flags,
384 &args->handle);
385}
386
387static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
388 struct drm_file *file)
389{
390 struct etnaviv_drm_private *priv = dev->dev_private;
391 struct drm_etnaviv_gem_wait *args = data;
392 struct drm_etnaviv_timespec *timeout = &args->timeout;
393 struct drm_gem_object *obj;
394 struct etnaviv_gpu *gpu;
395 int ret;
396
397 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
398 return -EINVAL;
399
400 if (args->pipe >= ETNA_MAX_PIPES)
401 return -EINVAL;
402
403 gpu = priv->gpu[args->pipe];
404 if (!gpu)
405 return -ENXIO;
406
407 obj = drm_gem_object_lookup(file, args->handle);
408 if (!obj)
409 return -ENOENT;
410
411 if (args->flags & ETNA_WAIT_NONBLOCK)
412 timeout = NULL;
413
414 ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
415
416 drm_gem_object_put(obj);
417
418 return ret;
419}
420
421static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
422 struct drm_file *file)
423{
424 struct etnaviv_drm_private *priv = dev->dev_private;
425 struct drm_etnaviv_pm_domain *args = data;
426 struct etnaviv_gpu *gpu;
427
428 if (args->pipe >= ETNA_MAX_PIPES)
429 return -EINVAL;
430
431 gpu = priv->gpu[args->pipe];
432 if (!gpu)
433 return -ENXIO;
434
435 return etnaviv_pm_query_dom(gpu, args);
436}
437
438static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
439 struct drm_file *file)
440{
441 struct etnaviv_drm_private *priv = dev->dev_private;
442 struct drm_etnaviv_pm_signal *args = data;
443 struct etnaviv_gpu *gpu;
444
445 if (args->pipe >= ETNA_MAX_PIPES)
446 return -EINVAL;
447
448 gpu = priv->gpu[args->pipe];
449 if (!gpu)
450 return -ENXIO;
451
452 return etnaviv_pm_query_sig(gpu, args);
453}
454
455static const struct drm_ioctl_desc etnaviv_ioctls[] = {
456#define ETNA_IOCTL(n, func, flags) \
457 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
458 ETNA_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
459 ETNA_IOCTL(GEM_NEW, gem_new, DRM_RENDER_ALLOW),
460 ETNA_IOCTL(GEM_INFO, gem_info, DRM_RENDER_ALLOW),
461 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW),
462 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW),
463 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_RENDER_ALLOW),
464 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_RENDER_ALLOW),
465 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_RENDER_ALLOW),
466 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_RENDER_ALLOW),
467 ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW),
468 ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
469};
470
471static const struct file_operations fops = {
472 .owner = THIS_MODULE,
473 .open = drm_open,
474 .release = drm_release,
475 .unlocked_ioctl = drm_ioctl,
476 .compat_ioctl = drm_compat_ioctl,
477 .poll = drm_poll,
478 .read = drm_read,
479 .llseek = no_llseek,
480 .mmap = etnaviv_gem_mmap,
481};
482
483static const struct drm_driver etnaviv_drm_driver = {
484 .driver_features = DRIVER_GEM | DRIVER_RENDER,
485 .open = etnaviv_open,
486 .postclose = etnaviv_postclose,
487 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
488 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
489 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
490 .gem_prime_mmap = etnaviv_gem_prime_mmap,
491#ifdef CONFIG_DEBUG_FS
492 .debugfs_init = etnaviv_debugfs_init,
493#endif
494 .ioctls = etnaviv_ioctls,
495 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
496 .fops = &fops,
497 .name = "etnaviv",
498 .desc = "etnaviv DRM",
499 .date = "20151214",
500 .major = 1,
501 .minor = 3,
502};
503
504/*
505 * Platform driver:
506 */
507static int etnaviv_bind(struct device *dev)
508{
509 struct etnaviv_drm_private *priv;
510 struct drm_device *drm;
511 int ret;
512
513 drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
514 if (IS_ERR(drm))
515 return PTR_ERR(drm);
516
517 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
518 if (!priv) {
519 dev_err(dev, "failed to allocate private data\n");
520 ret = -ENOMEM;
521 goto out_put;
522 }
523 drm->dev_private = priv;
524
525 dma_set_max_seg_size(dev, SZ_2G);
526
527 mutex_init(&priv->gem_lock);
528 INIT_LIST_HEAD(&priv->gem_list);
529 priv->num_gpus = 0;
530 priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
531
532 priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
533 if (IS_ERR(priv->cmdbuf_suballoc)) {
534 dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
535 ret = PTR_ERR(priv->cmdbuf_suballoc);
536 goto out_free_priv;
537 }
538
539 dev_set_drvdata(dev, drm);
540
541 ret = component_bind_all(dev, drm);
542 if (ret < 0)
543 goto out_destroy_suballoc;
544
545 load_gpu(drm);
546
547 ret = drm_dev_register(drm, 0);
548 if (ret)
549 goto out_unbind;
550
551 return 0;
552
553out_unbind:
554 component_unbind_all(dev, drm);
555out_destroy_suballoc:
556 etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
557out_free_priv:
558 kfree(priv);
559out_put:
560 drm_dev_put(drm);
561
562 return ret;
563}
564
565static void etnaviv_unbind(struct device *dev)
566{
567 struct drm_device *drm = dev_get_drvdata(dev);
568 struct etnaviv_drm_private *priv = drm->dev_private;
569
570 drm_dev_unregister(drm);
571
572 component_unbind_all(dev, drm);
573
574 etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
575
576 drm->dev_private = NULL;
577 kfree(priv);
578
579 drm_dev_put(drm);
580}
581
582static const struct component_master_ops etnaviv_master_ops = {
583 .bind = etnaviv_bind,
584 .unbind = etnaviv_unbind,
585};
586
587static int compare_of(struct device *dev, void *data)
588{
589 struct device_node *np = data;
590
591 return dev->of_node == np;
592}
593
594static int compare_str(struct device *dev, void *data)
595{
596 return !strcmp(dev_name(dev), data);
597}
598
599static int etnaviv_pdev_probe(struct platform_device *pdev)
600{
601 struct device *dev = &pdev->dev;
602 struct component_match *match = NULL;
603
604 if (!dev->platform_data) {
605 struct device_node *core_node;
606
607 for_each_compatible_node(core_node, NULL, "vivante,gc") {
608 if (!of_device_is_available(core_node))
609 continue;
610
611 drm_of_component_match_add(&pdev->dev, &match,
612 compare_of, core_node);
613 }
614 } else {
615 char **names = dev->platform_data;
616 unsigned i;
617
618 for (i = 0; names[i]; i++)
619 component_match_add(dev, &match, compare_str, names[i]);
620 }
621
622 return component_master_add_with_match(dev, &etnaviv_master_ops, match);
623}
624
625static int etnaviv_pdev_remove(struct platform_device *pdev)
626{
627 component_master_del(&pdev->dev, &etnaviv_master_ops);
628
629 return 0;
630}
631
632static struct platform_driver etnaviv_platform_driver = {
633 .probe = etnaviv_pdev_probe,
634 .remove = etnaviv_pdev_remove,
635 .driver = {
636 .name = "etnaviv",
637 },
638};
639
640static struct platform_device *etnaviv_drm;
641
642static int __init etnaviv_init(void)
643{
644 struct platform_device *pdev;
645 int ret;
646 struct device_node *np;
647
648 etnaviv_validate_init();
649
650 ret = platform_driver_register(&etnaviv_gpu_driver);
651 if (ret != 0)
652 return ret;
653
654 ret = platform_driver_register(&etnaviv_platform_driver);
655 if (ret != 0)
656 goto unregister_gpu_driver;
657
658 /*
659 * If the DT contains at least one available GPU device, instantiate
660 * the DRM platform device.
661 */
662 for_each_compatible_node(np, NULL, "vivante,gc") {
663 if (!of_device_is_available(np))
664 continue;
665
666 pdev = platform_device_alloc("etnaviv", -1);
667 if (!pdev) {
668 ret = -ENOMEM;
669 of_node_put(np);
670 goto unregister_platform_driver;
671 }
672 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
673 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
674
675 /*
676 * Apply the same DMA configuration to the virtual etnaviv
677 * device as the GPU we found. This assumes that all Vivante
678 * GPUs in the system share the same DMA constraints.
679 */
680 of_dma_configure(&pdev->dev, np, true);
681
682 ret = platform_device_add(pdev);
683 if (ret) {
684 platform_device_put(pdev);
685 of_node_put(np);
686 goto unregister_platform_driver;
687 }
688
689 etnaviv_drm = pdev;
690 of_node_put(np);
691 break;
692 }
693
694 return 0;
695
696unregister_platform_driver:
697 platform_driver_unregister(&etnaviv_platform_driver);
698unregister_gpu_driver:
699 platform_driver_unregister(&etnaviv_gpu_driver);
700 return ret;
701}
702module_init(etnaviv_init);
703
704static void __exit etnaviv_exit(void)
705{
706 platform_device_unregister(etnaviv_drm);
707 platform_driver_unregister(&etnaviv_platform_driver);
708 platform_driver_unregister(&etnaviv_gpu_driver);
709}
710module_exit(etnaviv_exit);
711
712MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
713MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
714MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
715MODULE_DESCRIPTION("etnaviv DRM Driver");
716MODULE_LICENSE("GPL v2");
717MODULE_ALIAS("platform:etnaviv");