Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8#include <linux/dma-mapping.h>
9#include <linux/fault-inject.h>
10#include <linux/of_address.h>
11#include <linux/uaccess.h>
12
13#include <drm/drm_drv.h>
14#include <drm/drm_file.h>
15#include <drm/drm_ioctl.h>
16#include <drm/drm_of.h>
17
18#include "msm_drv.h"
19#include "msm_debugfs.h"
20#include "msm_kms.h"
21#include "adreno/adreno_gpu.h"
22
23/*
24 * MSM driver version:
25 * - 1.0.0 - initial interface
26 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
27 * - 1.2.0 - adds explicit fence support for submit ioctl
28 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
29 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
30 * MSM_GEM_INFO ioctl.
31 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
32 * GEM object's debug name
33 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
34 * - 1.6.0 - Syncobj support
35 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
36 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
37 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
38 * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
39 * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
40 * - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA
41 */
42#define MSM_VERSION_MAJOR 1
43#define MSM_VERSION_MINOR 12
44#define MSM_VERSION_PATCHLEVEL 0
45
46static void msm_deinit_vram(struct drm_device *ddev);
47
48static char *vram = "16m";
49MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
50module_param(vram, charp, 0);
51
52bool dumpstate;
53MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
54module_param(dumpstate, bool, 0600);
55
56static bool modeset = true;
57MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
58module_param(modeset, bool, 0600);
59
60#ifdef CONFIG_FAULT_INJECTION
61DECLARE_FAULT_ATTR(fail_gem_alloc);
62DECLARE_FAULT_ATTR(fail_gem_iova);
63#endif
64
65static int msm_drm_uninit(struct device *dev)
66{
67 struct platform_device *pdev = to_platform_device(dev);
68 struct msm_drm_private *priv = platform_get_drvdata(pdev);
69 struct drm_device *ddev = priv->dev;
70
71 /*
72 * Shutdown the hw if we're far enough along where things might be on.
73 * If we run this too early, we'll end up panicking in any variety of
74 * places. Since we don't register the drm device until late in
75 * msm_drm_init, drm_dev->registered is used as an indicator that the
76 * shutdown will be successful.
77 */
78 if (ddev->registered) {
79 drm_dev_unregister(ddev);
80 if (priv->kms)
81 drm_atomic_helper_shutdown(ddev);
82 }
83
84 /* We must cancel and cleanup any pending vblank enable/disable
85 * work before msm_irq_uninstall() to avoid work re-enabling an
86 * irq after uninstall has disabled it.
87 */
88
89 flush_workqueue(priv->wq);
90
91 msm_gem_shrinker_cleanup(ddev);
92
93 msm_perf_debugfs_cleanup(priv);
94 msm_rd_debugfs_cleanup(priv);
95
96 if (priv->kms)
97 msm_drm_kms_uninit(dev);
98
99 msm_deinit_vram(ddev);
100
101 component_unbind_all(dev, ddev);
102
103 ddev->dev_private = NULL;
104 drm_dev_put(ddev);
105
106 destroy_workqueue(priv->wq);
107
108 return 0;
109}
110
111bool msm_use_mmu(struct drm_device *dev)
112{
113 struct msm_drm_private *priv = dev->dev_private;
114
115 /*
116 * a2xx comes with its own MMU
117 * On other platforms IOMMU can be declared specified either for the
118 * MDP/DPU device or for its parent, MDSS device.
119 */
120 return priv->is_a2xx ||
121 device_iommu_mapped(dev->dev) ||
122 device_iommu_mapped(dev->dev->parent);
123}
124
125static int msm_init_vram(struct drm_device *dev)
126{
127 struct msm_drm_private *priv = dev->dev_private;
128 struct device_node *node;
129 unsigned long size = 0;
130 int ret = 0;
131
132 /* In the device-tree world, we could have a 'memory-region'
133 * phandle, which gives us a link to our "vram". Allocating
134 * is all nicely abstracted behind the dma api, but we need
135 * to know the entire size to allocate it all in one go. There
136 * are two cases:
137 * 1) device with no IOMMU, in which case we need exclusive
138 * access to a VRAM carveout big enough for all gpu
139 * buffers
140 * 2) device with IOMMU, but where the bootloader puts up
141 * a splash screen. In this case, the VRAM carveout
142 * need only be large enough for fbdev fb. But we need
143 * exclusive access to the buffer to avoid the kernel
144 * using those pages for other purposes (which appears
145 * as corruption on screen before we have a chance to
146 * load and do initial modeset)
147 */
148
149 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
150 if (node) {
151 struct resource r;
152 ret = of_address_to_resource(node, 0, &r);
153 of_node_put(node);
154 if (ret)
155 return ret;
156 size = r.end - r.start + 1;
157 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
158
159 /* if we have no IOMMU, then we need to use carveout allocator.
160 * Grab the entire DMA chunk carved out in early startup in
161 * mach-msm:
162 */
163 } else if (!msm_use_mmu(dev)) {
164 DRM_INFO("using %s VRAM carveout\n", vram);
165 size = memparse(vram, NULL);
166 }
167
168 if (size) {
169 unsigned long attrs = 0;
170 void *p;
171
172 priv->vram.size = size;
173
174 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
175 spin_lock_init(&priv->vram.lock);
176
177 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
178 attrs |= DMA_ATTR_WRITE_COMBINE;
179
180 /* note that for no-kernel-mapping, the vaddr returned
181 * is bogus, but non-null if allocation succeeded:
182 */
183 p = dma_alloc_attrs(dev->dev, size,
184 &priv->vram.paddr, GFP_KERNEL, attrs);
185 if (!p) {
186 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
187 priv->vram.paddr = 0;
188 return -ENOMEM;
189 }
190
191 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
192 (uint32_t)priv->vram.paddr,
193 (uint32_t)(priv->vram.paddr + size));
194 }
195
196 return ret;
197}
198
199static void msm_deinit_vram(struct drm_device *ddev)
200{
201 struct msm_drm_private *priv = ddev->dev_private;
202 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
203
204 if (!priv->vram.paddr)
205 return;
206
207 drm_mm_takedown(&priv->vram.mm);
208 dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
209 attrs);
210}
211
212static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
213{
214 struct msm_drm_private *priv = dev_get_drvdata(dev);
215 struct drm_device *ddev;
216 int ret;
217
218 if (drm_firmware_drivers_only())
219 return -ENODEV;
220
221 ddev = drm_dev_alloc(drv, dev);
222 if (IS_ERR(ddev)) {
223 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
224 return PTR_ERR(ddev);
225 }
226 ddev->dev_private = priv;
227 priv->dev = ddev;
228
229 priv->wq = alloc_ordered_workqueue("msm", 0);
230 if (!priv->wq) {
231 ret = -ENOMEM;
232 goto err_put_dev;
233 }
234
235 INIT_LIST_HEAD(&priv->objects);
236 mutex_init(&priv->obj_lock);
237
238 /*
239 * Initialize the LRUs:
240 */
241 mutex_init(&priv->lru.lock);
242 drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
243 drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
244 drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
245 drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
246
247 /* Teach lockdep about lock ordering wrt. shrinker: */
248 fs_reclaim_acquire(GFP_KERNEL);
249 might_lock(&priv->lru.lock);
250 fs_reclaim_release(GFP_KERNEL);
251
252 if (priv->kms_init) {
253 ret = drmm_mode_config_init(ddev);
254 if (ret)
255 goto err_destroy_wq;
256 }
257
258 ret = msm_init_vram(ddev);
259 if (ret)
260 goto err_destroy_wq;
261
262 dma_set_max_seg_size(dev, UINT_MAX);
263
264 /* Bind all our sub-components: */
265 ret = component_bind_all(dev, ddev);
266 if (ret)
267 goto err_deinit_vram;
268
269 ret = msm_gem_shrinker_init(ddev);
270 if (ret)
271 goto err_msm_uninit;
272
273 if (priv->kms_init) {
274 ret = msm_drm_kms_init(dev, drv);
275 if (ret)
276 goto err_msm_uninit;
277 } else {
278 /* valid only for the dummy headless case, where of_node=NULL */
279 WARN_ON(dev->of_node);
280 ddev->driver_features &= ~DRIVER_MODESET;
281 ddev->driver_features &= ~DRIVER_ATOMIC;
282 }
283
284 ret = drm_dev_register(ddev, 0);
285 if (ret)
286 goto err_msm_uninit;
287
288 ret = msm_debugfs_late_init(ddev);
289 if (ret)
290 goto err_msm_uninit;
291
292 if (priv->kms_init) {
293 drm_kms_helper_poll_init(ddev);
294 msm_fbdev_setup(ddev);
295 }
296
297 return 0;
298
299err_msm_uninit:
300 msm_drm_uninit(dev);
301
302 return ret;
303
304err_deinit_vram:
305 msm_deinit_vram(ddev);
306err_destroy_wq:
307 destroy_workqueue(priv->wq);
308err_put_dev:
309 drm_dev_put(ddev);
310
311 return ret;
312}
313
314/*
315 * DRM operations:
316 */
317
318static void load_gpu(struct drm_device *dev)
319{
320 static DEFINE_MUTEX(init_lock);
321 struct msm_drm_private *priv = dev->dev_private;
322
323 mutex_lock(&init_lock);
324
325 if (!priv->gpu)
326 priv->gpu = adreno_load_gpu(dev);
327
328 mutex_unlock(&init_lock);
329}
330
331static int context_init(struct drm_device *dev, struct drm_file *file)
332{
333 static atomic_t ident = ATOMIC_INIT(0);
334 struct msm_drm_private *priv = dev->dev_private;
335 struct msm_file_private *ctx;
336
337 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
338 if (!ctx)
339 return -ENOMEM;
340
341 INIT_LIST_HEAD(&ctx->submitqueues);
342 rwlock_init(&ctx->queuelock);
343
344 kref_init(&ctx->ref);
345 msm_submitqueue_init(dev, ctx);
346
347 ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
348 file->driver_priv = ctx;
349
350 ctx->seqno = atomic_inc_return(&ident);
351
352 return 0;
353}
354
355static int msm_open(struct drm_device *dev, struct drm_file *file)
356{
357 /* For now, load gpu on open.. to avoid the requirement of having
358 * firmware in the initrd.
359 */
360 load_gpu(dev);
361
362 return context_init(dev, file);
363}
364
365static void context_close(struct msm_file_private *ctx)
366{
367 msm_submitqueue_close(ctx);
368 msm_file_private_put(ctx);
369}
370
371static void msm_postclose(struct drm_device *dev, struct drm_file *file)
372{
373 struct msm_drm_private *priv = dev->dev_private;
374 struct msm_file_private *ctx = file->driver_priv;
375
376 /*
377 * It is not possible to set sysprof param to non-zero if gpu
378 * is not initialized:
379 */
380 if (priv->gpu)
381 msm_file_private_set_sysprof(ctx, priv->gpu, 0);
382
383 context_close(ctx);
384}
385
386/*
387 * DRM ioctls:
388 */
389
390static int msm_ioctl_get_param(struct drm_device *dev, void *data,
391 struct drm_file *file)
392{
393 struct msm_drm_private *priv = dev->dev_private;
394 struct drm_msm_param *args = data;
395 struct msm_gpu *gpu;
396
397 /* for now, we just have 3d pipe.. eventually this would need to
398 * be more clever to dispatch to appropriate gpu module:
399 */
400 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
401 return -EINVAL;
402
403 gpu = priv->gpu;
404
405 if (!gpu)
406 return -ENXIO;
407
408 return gpu->funcs->get_param(gpu, file->driver_priv,
409 args->param, &args->value, &args->len);
410}
411
412static int msm_ioctl_set_param(struct drm_device *dev, void *data,
413 struct drm_file *file)
414{
415 struct msm_drm_private *priv = dev->dev_private;
416 struct drm_msm_param *args = data;
417 struct msm_gpu *gpu;
418
419 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
420 return -EINVAL;
421
422 gpu = priv->gpu;
423
424 if (!gpu)
425 return -ENXIO;
426
427 return gpu->funcs->set_param(gpu, file->driver_priv,
428 args->param, args->value, args->len);
429}
430
431static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
432 struct drm_file *file)
433{
434 struct drm_msm_gem_new *args = data;
435 uint32_t flags = args->flags;
436
437 if (args->flags & ~MSM_BO_FLAGS) {
438 DRM_ERROR("invalid flags: %08x\n", args->flags);
439 return -EINVAL;
440 }
441
442 /*
443 * Uncached CPU mappings are deprecated, as of:
444 *
445 * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
446 *
447 * So promote them to WC.
448 */
449 if (flags & MSM_BO_UNCACHED) {
450 flags &= ~MSM_BO_CACHED;
451 flags |= MSM_BO_WC;
452 }
453
454 if (should_fail(&fail_gem_alloc, args->size))
455 return -ENOMEM;
456
457 return msm_gem_new_handle(dev, file, args->size,
458 args->flags, &args->handle, NULL);
459}
460
461static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
462{
463 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
464}
465
466static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
467 struct drm_file *file)
468{
469 struct drm_msm_gem_cpu_prep *args = data;
470 struct drm_gem_object *obj;
471 ktime_t timeout = to_ktime(args->timeout);
472 int ret;
473
474 if (args->op & ~MSM_PREP_FLAGS) {
475 DRM_ERROR("invalid op: %08x\n", args->op);
476 return -EINVAL;
477 }
478
479 obj = drm_gem_object_lookup(file, args->handle);
480 if (!obj)
481 return -ENOENT;
482
483 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
484
485 drm_gem_object_put(obj);
486
487 return ret;
488}
489
490static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
491 struct drm_file *file)
492{
493 struct drm_msm_gem_cpu_fini *args = data;
494 struct drm_gem_object *obj;
495 int ret;
496
497 obj = drm_gem_object_lookup(file, args->handle);
498 if (!obj)
499 return -ENOENT;
500
501 ret = msm_gem_cpu_fini(obj);
502
503 drm_gem_object_put(obj);
504
505 return ret;
506}
507
508static int msm_ioctl_gem_info_iova(struct drm_device *dev,
509 struct drm_file *file, struct drm_gem_object *obj,
510 uint64_t *iova)
511{
512 struct msm_drm_private *priv = dev->dev_private;
513 struct msm_file_private *ctx = file->driver_priv;
514
515 if (!priv->gpu)
516 return -EINVAL;
517
518 if (should_fail(&fail_gem_iova, obj->size))
519 return -ENOMEM;
520
521 /*
522 * Don't pin the memory here - just get an address so that userspace can
523 * be productive
524 */
525 return msm_gem_get_iova(obj, ctx->aspace, iova);
526}
527
528static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
529 struct drm_file *file, struct drm_gem_object *obj,
530 uint64_t iova)
531{
532 struct msm_drm_private *priv = dev->dev_private;
533 struct msm_file_private *ctx = file->driver_priv;
534
535 if (!priv->gpu)
536 return -EINVAL;
537
538 /* Only supported if per-process address space is supported: */
539 if (priv->gpu->aspace == ctx->aspace)
540 return -EOPNOTSUPP;
541
542 if (should_fail(&fail_gem_iova, obj->size))
543 return -ENOMEM;
544
545 return msm_gem_set_iova(obj, ctx->aspace, iova);
546}
547
548static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
549 __user void *metadata,
550 u32 metadata_size)
551{
552 struct msm_gem_object *msm_obj = to_msm_bo(obj);
553 void *buf;
554 int ret;
555
556 /* Impose a moderate upper bound on metadata size: */
557 if (metadata_size > 128) {
558 return -EOVERFLOW;
559 }
560
561 /* Use a temporary buf to keep copy_from_user() outside of gem obj lock: */
562 buf = memdup_user(metadata, metadata_size);
563 if (IS_ERR(buf))
564 return PTR_ERR(buf);
565
566 ret = msm_gem_lock_interruptible(obj);
567 if (ret)
568 goto out;
569
570 msm_obj->metadata =
571 krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL);
572 msm_obj->metadata_size = metadata_size;
573 memcpy(msm_obj->metadata, buf, metadata_size);
574
575 msm_gem_unlock(obj);
576
577out:
578 kfree(buf);
579
580 return ret;
581}
582
583static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object *obj,
584 __user void *metadata,
585 u32 *metadata_size)
586{
587 struct msm_gem_object *msm_obj = to_msm_bo(obj);
588 void *buf;
589 int ret, len;
590
591 if (!metadata) {
592 /*
593 * Querying the size is inherently racey, but
594 * EXT_external_objects expects the app to confirm
595 * via device and driver UUIDs that the exporter and
596 * importer versions match. All we can do from the
597 * kernel side is check the length under obj lock
598 * when userspace tries to retrieve the metadata
599 */
600 *metadata_size = msm_obj->metadata_size;
601 return 0;
602 }
603
604 ret = msm_gem_lock_interruptible(obj);
605 if (ret)
606 return ret;
607
608 /* Avoid copy_to_user() under gem obj lock: */
609 len = msm_obj->metadata_size;
610 buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL);
611
612 msm_gem_unlock(obj);
613
614 if (*metadata_size < len) {
615 ret = -ETOOSMALL;
616 } else if (copy_to_user(metadata, buf, len)) {
617 ret = -EFAULT;
618 } else {
619 *metadata_size = len;
620 }
621
622 kfree(buf);
623
624 return 0;
625}
626
627static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
628 struct drm_file *file)
629{
630 struct drm_msm_gem_info *args = data;
631 struct drm_gem_object *obj;
632 struct msm_gem_object *msm_obj;
633 int i, ret = 0;
634
635 if (args->pad)
636 return -EINVAL;
637
638 switch (args->info) {
639 case MSM_INFO_GET_OFFSET:
640 case MSM_INFO_GET_IOVA:
641 case MSM_INFO_SET_IOVA:
642 case MSM_INFO_GET_FLAGS:
643 /* value returned as immediate, not pointer, so len==0: */
644 if (args->len)
645 return -EINVAL;
646 break;
647 case MSM_INFO_SET_NAME:
648 case MSM_INFO_GET_NAME:
649 case MSM_INFO_SET_METADATA:
650 case MSM_INFO_GET_METADATA:
651 break;
652 default:
653 return -EINVAL;
654 }
655
656 obj = drm_gem_object_lookup(file, args->handle);
657 if (!obj)
658 return -ENOENT;
659
660 msm_obj = to_msm_bo(obj);
661
662 switch (args->info) {
663 case MSM_INFO_GET_OFFSET:
664 args->value = msm_gem_mmap_offset(obj);
665 break;
666 case MSM_INFO_GET_IOVA:
667 ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
668 break;
669 case MSM_INFO_SET_IOVA:
670 ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
671 break;
672 case MSM_INFO_GET_FLAGS:
673 if (obj->import_attach) {
674 ret = -EINVAL;
675 break;
676 }
677 /* Hide internal kernel-only flags: */
678 args->value = to_msm_bo(obj)->flags & MSM_BO_FLAGS;
679 ret = 0;
680 break;
681 case MSM_INFO_SET_NAME:
682 /* length check should leave room for terminating null: */
683 if (args->len >= sizeof(msm_obj->name)) {
684 ret = -EINVAL;
685 break;
686 }
687 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
688 args->len)) {
689 msm_obj->name[0] = '\0';
690 ret = -EFAULT;
691 break;
692 }
693 msm_obj->name[args->len] = '\0';
694 for (i = 0; i < args->len; i++) {
695 if (!isprint(msm_obj->name[i])) {
696 msm_obj->name[i] = '\0';
697 break;
698 }
699 }
700 break;
701 case MSM_INFO_GET_NAME:
702 if (args->value && (args->len < strlen(msm_obj->name))) {
703 ret = -ETOOSMALL;
704 break;
705 }
706 args->len = strlen(msm_obj->name);
707 if (args->value) {
708 if (copy_to_user(u64_to_user_ptr(args->value),
709 msm_obj->name, args->len))
710 ret = -EFAULT;
711 }
712 break;
713 case MSM_INFO_SET_METADATA:
714 ret = msm_ioctl_gem_info_set_metadata(
715 obj, u64_to_user_ptr(args->value), args->len);
716 break;
717 case MSM_INFO_GET_METADATA:
718 ret = msm_ioctl_gem_info_get_metadata(
719 obj, u64_to_user_ptr(args->value), &args->len);
720 break;
721 }
722
723 drm_gem_object_put(obj);
724
725 return ret;
726}
727
728static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
729 ktime_t timeout, uint32_t flags)
730{
731 struct dma_fence *fence;
732 int ret;
733
734 if (fence_after(fence_id, queue->last_fence)) {
735 DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
736 fence_id, queue->last_fence);
737 return -EINVAL;
738 }
739
740 /*
741 * Map submitqueue scoped "seqno" (which is actually an idr key)
742 * back to underlying dma-fence
743 *
744 * The fence is removed from the fence_idr when the submit is
745 * retired, so if the fence is not found it means there is nothing
746 * to wait for
747 */
748 spin_lock(&queue->idr_lock);
749 fence = idr_find(&queue->fence_idr, fence_id);
750 if (fence)
751 fence = dma_fence_get_rcu(fence);
752 spin_unlock(&queue->idr_lock);
753
754 if (!fence)
755 return 0;
756
757 if (flags & MSM_WAIT_FENCE_BOOST)
758 dma_fence_set_deadline(fence, ktime_get());
759
760 ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
761 if (ret == 0) {
762 ret = -ETIMEDOUT;
763 } else if (ret != -ERESTARTSYS) {
764 ret = 0;
765 }
766
767 dma_fence_put(fence);
768
769 return ret;
770}
771
772static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
773 struct drm_file *file)
774{
775 struct msm_drm_private *priv = dev->dev_private;
776 struct drm_msm_wait_fence *args = data;
777 struct msm_gpu_submitqueue *queue;
778 int ret;
779
780 if (args->flags & ~MSM_WAIT_FENCE_FLAGS) {
781 DRM_ERROR("invalid flags: %08x\n", args->flags);
782 return -EINVAL;
783 }
784
785 if (!priv->gpu)
786 return 0;
787
788 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
789 if (!queue)
790 return -ENOENT;
791
792 ret = wait_fence(queue, args->fence, to_ktime(args->timeout), args->flags);
793
794 msm_submitqueue_put(queue);
795
796 return ret;
797}
798
799static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
800 struct drm_file *file)
801{
802 struct drm_msm_gem_madvise *args = data;
803 struct drm_gem_object *obj;
804 int ret;
805
806 switch (args->madv) {
807 case MSM_MADV_DONTNEED:
808 case MSM_MADV_WILLNEED:
809 break;
810 default:
811 return -EINVAL;
812 }
813
814 obj = drm_gem_object_lookup(file, args->handle);
815 if (!obj) {
816 return -ENOENT;
817 }
818
819 ret = msm_gem_madvise(obj, args->madv);
820 if (ret >= 0) {
821 args->retained = ret;
822 ret = 0;
823 }
824
825 drm_gem_object_put(obj);
826
827 return ret;
828}
829
830
831static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
832 struct drm_file *file)
833{
834 struct drm_msm_submitqueue *args = data;
835
836 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
837 return -EINVAL;
838
839 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
840 args->flags, &args->id);
841}
842
843static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
844 struct drm_file *file)
845{
846 return msm_submitqueue_query(dev, file->driver_priv, data);
847}
848
849static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
850 struct drm_file *file)
851{
852 u32 id = *(u32 *) data;
853
854 return msm_submitqueue_remove(file->driver_priv, id);
855}
856
857static const struct drm_ioctl_desc msm_ioctls[] = {
858 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
859 DRM_IOCTL_DEF_DRV(MSM_SET_PARAM, msm_ioctl_set_param, DRM_RENDER_ALLOW),
860 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
861 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
862 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
863 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
864 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
865 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
866 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
867 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
868 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
869 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
870};
871
872static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file)
873{
874 struct drm_device *dev = file->minor->dev;
875 struct msm_drm_private *priv = dev->dev_private;
876
877 if (!priv->gpu)
878 return;
879
880 msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, p);
881
882 drm_show_memory_stats(p, file);
883}
884
885static const struct file_operations fops = {
886 .owner = THIS_MODULE,
887 DRM_GEM_FOPS,
888 .show_fdinfo = drm_show_fdinfo,
889};
890
891static const struct drm_driver msm_driver = {
892 .driver_features = DRIVER_GEM |
893 DRIVER_RENDER |
894 DRIVER_ATOMIC |
895 DRIVER_MODESET |
896 DRIVER_SYNCOBJ,
897 .open = msm_open,
898 .postclose = msm_postclose,
899 .dumb_create = msm_gem_dumb_create,
900 .dumb_map_offset = msm_gem_dumb_map_offset,
901 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
902#ifdef CONFIG_DEBUG_FS
903 .debugfs_init = msm_debugfs_init,
904#endif
905 .show_fdinfo = msm_show_fdinfo,
906 .ioctls = msm_ioctls,
907 .num_ioctls = ARRAY_SIZE(msm_ioctls),
908 .fops = &fops,
909 .name = "msm",
910 .desc = "MSM Snapdragon DRM",
911 .date = "20130625",
912 .major = MSM_VERSION_MAJOR,
913 .minor = MSM_VERSION_MINOR,
914 .patchlevel = MSM_VERSION_PATCHLEVEL,
915};
916
917/*
918 * Componentized driver support:
919 */
920
921/*
922 * Identify what components need to be added by parsing what remote-endpoints
923 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
924 * is no external component that we need to add since LVDS is within MDP4
925 * itself.
926 */
927static int add_components_mdp(struct device *master_dev,
928 struct component_match **matchptr)
929{
930 struct device_node *np = master_dev->of_node;
931 struct device_node *ep_node;
932
933 for_each_endpoint_of_node(np, ep_node) {
934 struct device_node *intf;
935 struct of_endpoint ep;
936 int ret;
937
938 ret = of_graph_parse_endpoint(ep_node, &ep);
939 if (ret) {
940 DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
941 of_node_put(ep_node);
942 return ret;
943 }
944
945 /*
946 * The LCDC/LVDS port on MDP4 is a speacial case where the
947 * remote-endpoint isn't a component that we need to add
948 */
949 if (of_device_is_compatible(np, "qcom,mdp4") &&
950 ep.port == 0)
951 continue;
952
953 /*
954 * It's okay if some of the ports don't have a remote endpoint
955 * specified. It just means that the port isn't connected to
956 * any external interface.
957 */
958 intf = of_graph_get_remote_port_parent(ep_node);
959 if (!intf)
960 continue;
961
962 if (of_device_is_available(intf))
963 drm_of_component_match_add(master_dev, matchptr,
964 component_compare_of, intf);
965
966 of_node_put(intf);
967 }
968
969 return 0;
970}
971
972/*
973 * We don't know what's the best binding to link the gpu with the drm device.
974 * Fow now, we just hunt for all the possible gpus that we support, and add them
975 * as components.
976 */
977static const struct of_device_id msm_gpu_match[] = {
978 { .compatible = "qcom,adreno" },
979 { .compatible = "qcom,adreno-3xx" },
980 { .compatible = "amd,imageon" },
981 { .compatible = "qcom,kgsl-3d0" },
982 { },
983};
984
985static int add_gpu_components(struct device *dev,
986 struct component_match **matchptr)
987{
988 struct device_node *np;
989
990 np = of_find_matching_node(NULL, msm_gpu_match);
991 if (!np)
992 return 0;
993
994 if (of_device_is_available(np))
995 drm_of_component_match_add(dev, matchptr, component_compare_of, np);
996
997 of_node_put(np);
998
999 return 0;
1000}
1001
1002static int msm_drm_bind(struct device *dev)
1003{
1004 return msm_drm_init(dev, &msm_driver);
1005}
1006
1007static void msm_drm_unbind(struct device *dev)
1008{
1009 msm_drm_uninit(dev);
1010}
1011
1012const struct component_master_ops msm_drm_ops = {
1013 .bind = msm_drm_bind,
1014 .unbind = msm_drm_unbind,
1015};
1016
1017int msm_drv_probe(struct device *master_dev,
1018 int (*kms_init)(struct drm_device *dev),
1019 struct msm_kms *kms)
1020{
1021 struct msm_drm_private *priv;
1022 struct component_match *match = NULL;
1023 int ret;
1024
1025 priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
1026 if (!priv)
1027 return -ENOMEM;
1028
1029 priv->kms = kms;
1030 priv->kms_init = kms_init;
1031 dev_set_drvdata(master_dev, priv);
1032
1033 /* Add mdp components if we have KMS. */
1034 if (kms_init) {
1035 ret = add_components_mdp(master_dev, &match);
1036 if (ret)
1037 return ret;
1038 }
1039
1040 ret = add_gpu_components(master_dev, &match);
1041 if (ret)
1042 return ret;
1043
1044 /* on all devices that I am aware of, iommu's which can map
1045 * any address the cpu can see are used:
1046 */
1047 ret = dma_set_mask_and_coherent(master_dev, ~0);
1048 if (ret)
1049 return ret;
1050
1051 ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
1052 if (ret)
1053 return ret;
1054
1055 return 0;
1056}
1057
1058/*
1059 * Platform driver:
1060 * Used only for headlesss GPU instances
1061 */
1062
1063static int msm_pdev_probe(struct platform_device *pdev)
1064{
1065 return msm_drv_probe(&pdev->dev, NULL, NULL);
1066}
1067
1068static void msm_pdev_remove(struct platform_device *pdev)
1069{
1070 component_master_del(&pdev->dev, &msm_drm_ops);
1071}
1072
1073static struct platform_driver msm_platform_driver = {
1074 .probe = msm_pdev_probe,
1075 .remove_new = msm_pdev_remove,
1076 .driver = {
1077 .name = "msm",
1078 },
1079};
1080
1081static int __init msm_drm_register(void)
1082{
1083 if (!modeset)
1084 return -EINVAL;
1085
1086 DBG("init");
1087 msm_mdp_register();
1088 msm_dpu_register();
1089 msm_dsi_register();
1090 msm_hdmi_register();
1091 msm_dp_register();
1092 adreno_register();
1093 msm_mdp4_register();
1094 msm_mdss_register();
1095 return platform_driver_register(&msm_platform_driver);
1096}
1097
1098static void __exit msm_drm_unregister(void)
1099{
1100 DBG("fini");
1101 platform_driver_unregister(&msm_platform_driver);
1102 msm_mdss_unregister();
1103 msm_mdp4_unregister();
1104 msm_dp_unregister();
1105 msm_hdmi_unregister();
1106 adreno_unregister();
1107 msm_dsi_unregister();
1108 msm_mdp_unregister();
1109 msm_dpu_unregister();
1110}
1111
1112module_init(msm_drm_register);
1113module_exit(msm_drm_unregister);
1114
1115MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1116MODULE_DESCRIPTION("MSM DRM Driver");
1117MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8#include <linux/dma-mapping.h>
9#include <linux/kthread.h>
10#include <linux/uaccess.h>
11#include <uapi/linux/sched/types.h>
12
13#include <drm/drm_drv.h>
14#include <drm/drm_file.h>
15#include <drm/drm_ioctl.h>
16#include <drm/drm_irq.h>
17#include <drm/drm_prime.h>
18#include <drm/drm_of.h>
19#include <drm/drm_vblank.h>
20
21#include "msm_drv.h"
22#include "msm_debugfs.h"
23#include "msm_fence.h"
24#include "msm_gem.h"
25#include "msm_gpu.h"
26#include "msm_kms.h"
27#include "adreno/adreno_gpu.h"
28
29/*
30 * MSM driver version:
31 * - 1.0.0 - initial interface
32 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
33 * - 1.2.0 - adds explicit fence support for submit ioctl
34 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
35 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
36 * MSM_GEM_INFO ioctl.
37 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
38 * GEM object's debug name
39 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
40 * - 1.6.0 - Syncobj support
41 */
42#define MSM_VERSION_MAJOR 1
43#define MSM_VERSION_MINOR 6
44#define MSM_VERSION_PATCHLEVEL 0
45
46static const struct drm_mode_config_funcs mode_config_funcs = {
47 .fb_create = msm_framebuffer_create,
48 .output_poll_changed = drm_fb_helper_output_poll_changed,
49 .atomic_check = drm_atomic_helper_check,
50 .atomic_commit = drm_atomic_helper_commit,
51};
52
53static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
54 .atomic_commit_tail = msm_atomic_commit_tail,
55};
56
57#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
58static bool reglog = false;
59MODULE_PARM_DESC(reglog, "Enable register read/write logging");
60module_param(reglog, bool, 0600);
61#else
62#define reglog 0
63#endif
64
65#ifdef CONFIG_DRM_FBDEV_EMULATION
66static bool fbdev = true;
67MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
68module_param(fbdev, bool, 0600);
69#endif
70
71static char *vram = "16m";
72MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
73module_param(vram, charp, 0);
74
75bool dumpstate = false;
76MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
77module_param(dumpstate, bool, 0600);
78
79static bool modeset = true;
80MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
81module_param(modeset, bool, 0600);
82
83/*
84 * Util/helpers:
85 */
86
87struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
88 const char *name)
89{
90 int i;
91 char n[32];
92
93 snprintf(n, sizeof(n), "%s_clk", name);
94
95 for (i = 0; bulk && i < count; i++) {
96 if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
97 return bulk[i].clk;
98 }
99
100
101 return NULL;
102}
103
104struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
105{
106 struct clk *clk;
107 char name2[32];
108
109 clk = devm_clk_get(&pdev->dev, name);
110 if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
111 return clk;
112
113 snprintf(name2, sizeof(name2), "%s_clk", name);
114
115 clk = devm_clk_get(&pdev->dev, name2);
116 if (!IS_ERR(clk))
117 dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
118 "\"%s\" instead of \"%s\"\n", name, name2);
119
120 return clk;
121}
122
123void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
124 const char *dbgname, bool quiet)
125{
126 struct resource *res;
127 unsigned long size;
128 void __iomem *ptr;
129
130 if (name)
131 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
132 else
133 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
134
135 if (!res) {
136 if (!quiet)
137 DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
138 return ERR_PTR(-EINVAL);
139 }
140
141 size = resource_size(res);
142
143 ptr = devm_ioremap(&pdev->dev, res->start, size);
144 if (!ptr) {
145 if (!quiet)
146 DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
147 return ERR_PTR(-ENOMEM);
148 }
149
150 if (reglog)
151 printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
152
153 return ptr;
154}
155
156void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
157 const char *dbgname)
158{
159 return _msm_ioremap(pdev, name, dbgname, false);
160}
161
162void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
163 const char *dbgname)
164{
165 return _msm_ioremap(pdev, name, dbgname, true);
166}
167
168void msm_writel(u32 data, void __iomem *addr)
169{
170 if (reglog)
171 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
172 writel(data, addr);
173}
174
175u32 msm_readl(const void __iomem *addr)
176{
177 u32 val = readl(addr);
178 if (reglog)
179 pr_err("IO:R %p %08x\n", addr, val);
180 return val;
181}
182
183struct msm_vblank_work {
184 struct work_struct work;
185 int crtc_id;
186 bool enable;
187 struct msm_drm_private *priv;
188};
189
190static void vblank_ctrl_worker(struct work_struct *work)
191{
192 struct msm_vblank_work *vbl_work = container_of(work,
193 struct msm_vblank_work, work);
194 struct msm_drm_private *priv = vbl_work->priv;
195 struct msm_kms *kms = priv->kms;
196
197 if (vbl_work->enable)
198 kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
199 else
200 kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
201
202 kfree(vbl_work);
203}
204
205static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
206 int crtc_id, bool enable)
207{
208 struct msm_vblank_work *vbl_work;
209
210 vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
211 if (!vbl_work)
212 return -ENOMEM;
213
214 INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
215
216 vbl_work->crtc_id = crtc_id;
217 vbl_work->enable = enable;
218 vbl_work->priv = priv;
219
220 queue_work(priv->wq, &vbl_work->work);
221
222 return 0;
223}
224
225static int msm_drm_uninit(struct device *dev)
226{
227 struct platform_device *pdev = to_platform_device(dev);
228 struct drm_device *ddev = platform_get_drvdata(pdev);
229 struct msm_drm_private *priv = ddev->dev_private;
230 struct msm_kms *kms = priv->kms;
231 struct msm_mdss *mdss = priv->mdss;
232 int i;
233
234 /*
235 * Shutdown the hw if we're far enough along where things might be on.
236 * If we run this too early, we'll end up panicking in any variety of
237 * places. Since we don't register the drm device until late in
238 * msm_drm_init, drm_dev->registered is used as an indicator that the
239 * shutdown will be successful.
240 */
241 if (ddev->registered) {
242 drm_dev_unregister(ddev);
243 drm_atomic_helper_shutdown(ddev);
244 }
245
246 /* We must cancel and cleanup any pending vblank enable/disable
247 * work before drm_irq_uninstall() to avoid work re-enabling an
248 * irq after uninstall has disabled it.
249 */
250
251 flush_workqueue(priv->wq);
252
253 /* clean up event worker threads */
254 for (i = 0; i < priv->num_crtcs; i++) {
255 if (priv->event_thread[i].worker)
256 kthread_destroy_worker(priv->event_thread[i].worker);
257 }
258
259 msm_gem_shrinker_cleanup(ddev);
260
261 drm_kms_helper_poll_fini(ddev);
262
263 msm_perf_debugfs_cleanup(priv);
264 msm_rd_debugfs_cleanup(priv);
265
266#ifdef CONFIG_DRM_FBDEV_EMULATION
267 if (fbdev && priv->fbdev)
268 msm_fbdev_free(ddev);
269#endif
270
271 drm_mode_config_cleanup(ddev);
272
273 pm_runtime_get_sync(dev);
274 drm_irq_uninstall(ddev);
275 pm_runtime_put_sync(dev);
276
277 if (kms && kms->funcs)
278 kms->funcs->destroy(kms);
279
280 if (priv->vram.paddr) {
281 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
282 drm_mm_takedown(&priv->vram.mm);
283 dma_free_attrs(dev, priv->vram.size, NULL,
284 priv->vram.paddr, attrs);
285 }
286
287 component_unbind_all(dev, ddev);
288
289 if (mdss && mdss->funcs)
290 mdss->funcs->destroy(ddev);
291
292 ddev->dev_private = NULL;
293 drm_dev_put(ddev);
294
295 destroy_workqueue(priv->wq);
296 kfree(priv);
297
298 return 0;
299}
300
301#define KMS_MDP4 4
302#define KMS_MDP5 5
303#define KMS_DPU 3
304
305static int get_mdp_ver(struct platform_device *pdev)
306{
307 struct device *dev = &pdev->dev;
308
309 return (int) (unsigned long) of_device_get_match_data(dev);
310}
311
312#include <linux/of_address.h>
313
314bool msm_use_mmu(struct drm_device *dev)
315{
316 struct msm_drm_private *priv = dev->dev_private;
317
318 /* a2xx comes with its own MMU */
319 return priv->is_a2xx || iommu_present(&platform_bus_type);
320}
321
322static int msm_init_vram(struct drm_device *dev)
323{
324 struct msm_drm_private *priv = dev->dev_private;
325 struct device_node *node;
326 unsigned long size = 0;
327 int ret = 0;
328
329 /* In the device-tree world, we could have a 'memory-region'
330 * phandle, which gives us a link to our "vram". Allocating
331 * is all nicely abstracted behind the dma api, but we need
332 * to know the entire size to allocate it all in one go. There
333 * are two cases:
334 * 1) device with no IOMMU, in which case we need exclusive
335 * access to a VRAM carveout big enough for all gpu
336 * buffers
337 * 2) device with IOMMU, but where the bootloader puts up
338 * a splash screen. In this case, the VRAM carveout
339 * need only be large enough for fbdev fb. But we need
340 * exclusive access to the buffer to avoid the kernel
341 * using those pages for other purposes (which appears
342 * as corruption on screen before we have a chance to
343 * load and do initial modeset)
344 */
345
346 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
347 if (node) {
348 struct resource r;
349 ret = of_address_to_resource(node, 0, &r);
350 of_node_put(node);
351 if (ret)
352 return ret;
353 size = r.end - r.start;
354 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
355
356 /* if we have no IOMMU, then we need to use carveout allocator.
357 * Grab the entire CMA chunk carved out in early startup in
358 * mach-msm:
359 */
360 } else if (!msm_use_mmu(dev)) {
361 DRM_INFO("using %s VRAM carveout\n", vram);
362 size = memparse(vram, NULL);
363 }
364
365 if (size) {
366 unsigned long attrs = 0;
367 void *p;
368
369 priv->vram.size = size;
370
371 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
372 spin_lock_init(&priv->vram.lock);
373
374 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
375 attrs |= DMA_ATTR_WRITE_COMBINE;
376
377 /* note that for no-kernel-mapping, the vaddr returned
378 * is bogus, but non-null if allocation succeeded:
379 */
380 p = dma_alloc_attrs(dev->dev, size,
381 &priv->vram.paddr, GFP_KERNEL, attrs);
382 if (!p) {
383 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
384 priv->vram.paddr = 0;
385 return -ENOMEM;
386 }
387
388 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
389 (uint32_t)priv->vram.paddr,
390 (uint32_t)(priv->vram.paddr + size));
391 }
392
393 return ret;
394}
395
396static int msm_drm_init(struct device *dev, struct drm_driver *drv)
397{
398 struct platform_device *pdev = to_platform_device(dev);
399 struct drm_device *ddev;
400 struct msm_drm_private *priv;
401 struct msm_kms *kms;
402 struct msm_mdss *mdss;
403 int ret, i;
404
405 ddev = drm_dev_alloc(drv, dev);
406 if (IS_ERR(ddev)) {
407 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
408 return PTR_ERR(ddev);
409 }
410
411 platform_set_drvdata(pdev, ddev);
412
413 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
414 if (!priv) {
415 ret = -ENOMEM;
416 goto err_put_drm_dev;
417 }
418
419 ddev->dev_private = priv;
420 priv->dev = ddev;
421
422 switch (get_mdp_ver(pdev)) {
423 case KMS_MDP5:
424 ret = mdp5_mdss_init(ddev);
425 break;
426 case KMS_DPU:
427 ret = dpu_mdss_init(ddev);
428 break;
429 default:
430 ret = 0;
431 break;
432 }
433 if (ret)
434 goto err_free_priv;
435
436 mdss = priv->mdss;
437
438 priv->wq = alloc_ordered_workqueue("msm", 0);
439
440 INIT_WORK(&priv->free_work, msm_gem_free_work);
441 init_llist_head(&priv->free_list);
442
443 INIT_LIST_HEAD(&priv->inactive_list);
444
445 drm_mode_config_init(ddev);
446
447 /* Bind all our sub-components: */
448 ret = component_bind_all(dev, ddev);
449 if (ret)
450 goto err_destroy_mdss;
451
452 ret = msm_init_vram(ddev);
453 if (ret)
454 goto err_msm_uninit;
455
456 if (!dev->dma_parms) {
457 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
458 GFP_KERNEL);
459 if (!dev->dma_parms) {
460 ret = -ENOMEM;
461 goto err_msm_uninit;
462 }
463 }
464 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
465
466 msm_gem_shrinker_init(ddev);
467
468 switch (get_mdp_ver(pdev)) {
469 case KMS_MDP4:
470 kms = mdp4_kms_init(ddev);
471 priv->kms = kms;
472 break;
473 case KMS_MDP5:
474 kms = mdp5_kms_init(ddev);
475 break;
476 case KMS_DPU:
477 kms = dpu_kms_init(ddev);
478 priv->kms = kms;
479 break;
480 default:
481 /* valid only for the dummy headless case, where of_node=NULL */
482 WARN_ON(dev->of_node);
483 kms = NULL;
484 break;
485 }
486
487 if (IS_ERR(kms)) {
488 DRM_DEV_ERROR(dev, "failed to load kms\n");
489 ret = PTR_ERR(kms);
490 priv->kms = NULL;
491 goto err_msm_uninit;
492 }
493
494 /* Enable normalization of plane zpos */
495 ddev->mode_config.normalize_zpos = true;
496
497 if (kms) {
498 kms->dev = ddev;
499 ret = kms->funcs->hw_init(kms);
500 if (ret) {
501 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
502 goto err_msm_uninit;
503 }
504 }
505
506 ddev->mode_config.funcs = &mode_config_funcs;
507 ddev->mode_config.helper_private = &mode_config_helper_funcs;
508
509 for (i = 0; i < priv->num_crtcs; i++) {
510 /* initialize event thread */
511 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
512 priv->event_thread[i].dev = ddev;
513 priv->event_thread[i].worker = kthread_create_worker(0,
514 "crtc_event:%d", priv->event_thread[i].crtc_id);
515 if (IS_ERR(priv->event_thread[i].worker)) {
516 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
517 goto err_msm_uninit;
518 }
519
520 sched_set_fifo(priv->event_thread[i].worker->task);
521 }
522
523 ret = drm_vblank_init(ddev, priv->num_crtcs);
524 if (ret < 0) {
525 DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
526 goto err_msm_uninit;
527 }
528
529 if (kms) {
530 pm_runtime_get_sync(dev);
531 ret = drm_irq_install(ddev, kms->irq);
532 pm_runtime_put_sync(dev);
533 if (ret < 0) {
534 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
535 goto err_msm_uninit;
536 }
537 }
538
539 ret = drm_dev_register(ddev, 0);
540 if (ret)
541 goto err_msm_uninit;
542
543 drm_mode_config_reset(ddev);
544
545#ifdef CONFIG_DRM_FBDEV_EMULATION
546 if (kms && fbdev)
547 priv->fbdev = msm_fbdev_init(ddev);
548#endif
549
550 ret = msm_debugfs_late_init(ddev);
551 if (ret)
552 goto err_msm_uninit;
553
554 drm_kms_helper_poll_init(ddev);
555
556 return 0;
557
558err_msm_uninit:
559 msm_drm_uninit(dev);
560 return ret;
561err_destroy_mdss:
562 if (mdss && mdss->funcs)
563 mdss->funcs->destroy(ddev);
564err_free_priv:
565 kfree(priv);
566err_put_drm_dev:
567 drm_dev_put(ddev);
568 return ret;
569}
570
571/*
572 * DRM operations:
573 */
574
575static void load_gpu(struct drm_device *dev)
576{
577 static DEFINE_MUTEX(init_lock);
578 struct msm_drm_private *priv = dev->dev_private;
579
580 mutex_lock(&init_lock);
581
582 if (!priv->gpu)
583 priv->gpu = adreno_load_gpu(dev);
584
585 mutex_unlock(&init_lock);
586}
587
588static int context_init(struct drm_device *dev, struct drm_file *file)
589{
590 struct msm_drm_private *priv = dev->dev_private;
591 struct msm_file_private *ctx;
592
593 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
594 if (!ctx)
595 return -ENOMEM;
596
597 msm_submitqueue_init(dev, ctx);
598
599 ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
600 file->driver_priv = ctx;
601
602 return 0;
603}
604
605static int msm_open(struct drm_device *dev, struct drm_file *file)
606{
607 /* For now, load gpu on open.. to avoid the requirement of having
608 * firmware in the initrd.
609 */
610 load_gpu(dev);
611
612 return context_init(dev, file);
613}
614
615static void context_close(struct msm_file_private *ctx)
616{
617 msm_submitqueue_close(ctx);
618 kfree(ctx);
619}
620
621static void msm_postclose(struct drm_device *dev, struct drm_file *file)
622{
623 struct msm_drm_private *priv = dev->dev_private;
624 struct msm_file_private *ctx = file->driver_priv;
625
626 mutex_lock(&dev->struct_mutex);
627 if (ctx == priv->lastctx)
628 priv->lastctx = NULL;
629 mutex_unlock(&dev->struct_mutex);
630
631 context_close(ctx);
632}
633
634static irqreturn_t msm_irq(int irq, void *arg)
635{
636 struct drm_device *dev = arg;
637 struct msm_drm_private *priv = dev->dev_private;
638 struct msm_kms *kms = priv->kms;
639 BUG_ON(!kms);
640 return kms->funcs->irq(kms);
641}
642
643static void msm_irq_preinstall(struct drm_device *dev)
644{
645 struct msm_drm_private *priv = dev->dev_private;
646 struct msm_kms *kms = priv->kms;
647 BUG_ON(!kms);
648 kms->funcs->irq_preinstall(kms);
649}
650
651static int msm_irq_postinstall(struct drm_device *dev)
652{
653 struct msm_drm_private *priv = dev->dev_private;
654 struct msm_kms *kms = priv->kms;
655 BUG_ON(!kms);
656
657 if (kms->funcs->irq_postinstall)
658 return kms->funcs->irq_postinstall(kms);
659
660 return 0;
661}
662
663static void msm_irq_uninstall(struct drm_device *dev)
664{
665 struct msm_drm_private *priv = dev->dev_private;
666 struct msm_kms *kms = priv->kms;
667 BUG_ON(!kms);
668 kms->funcs->irq_uninstall(kms);
669}
670
671int msm_crtc_enable_vblank(struct drm_crtc *crtc)
672{
673 struct drm_device *dev = crtc->dev;
674 unsigned int pipe = crtc->index;
675 struct msm_drm_private *priv = dev->dev_private;
676 struct msm_kms *kms = priv->kms;
677 if (!kms)
678 return -ENXIO;
679 DBG("dev=%p, crtc=%u", dev, pipe);
680 return vblank_ctrl_queue_work(priv, pipe, true);
681}
682
683void msm_crtc_disable_vblank(struct drm_crtc *crtc)
684{
685 struct drm_device *dev = crtc->dev;
686 unsigned int pipe = crtc->index;
687 struct msm_drm_private *priv = dev->dev_private;
688 struct msm_kms *kms = priv->kms;
689 if (!kms)
690 return;
691 DBG("dev=%p, crtc=%u", dev, pipe);
692 vblank_ctrl_queue_work(priv, pipe, false);
693}
694
695/*
696 * DRM ioctls:
697 */
698
699static int msm_ioctl_get_param(struct drm_device *dev, void *data,
700 struct drm_file *file)
701{
702 struct msm_drm_private *priv = dev->dev_private;
703 struct drm_msm_param *args = data;
704 struct msm_gpu *gpu;
705
706 /* for now, we just have 3d pipe.. eventually this would need to
707 * be more clever to dispatch to appropriate gpu module:
708 */
709 if (args->pipe != MSM_PIPE_3D0)
710 return -EINVAL;
711
712 gpu = priv->gpu;
713
714 if (!gpu)
715 return -ENXIO;
716
717 return gpu->funcs->get_param(gpu, args->param, &args->value);
718}
719
720static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
721 struct drm_file *file)
722{
723 struct drm_msm_gem_new *args = data;
724
725 if (args->flags & ~MSM_BO_FLAGS) {
726 DRM_ERROR("invalid flags: %08x\n", args->flags);
727 return -EINVAL;
728 }
729
730 return msm_gem_new_handle(dev, file, args->size,
731 args->flags, &args->handle, NULL);
732}
733
734static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
735{
736 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
737}
738
739static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
740 struct drm_file *file)
741{
742 struct drm_msm_gem_cpu_prep *args = data;
743 struct drm_gem_object *obj;
744 ktime_t timeout = to_ktime(args->timeout);
745 int ret;
746
747 if (args->op & ~MSM_PREP_FLAGS) {
748 DRM_ERROR("invalid op: %08x\n", args->op);
749 return -EINVAL;
750 }
751
752 obj = drm_gem_object_lookup(file, args->handle);
753 if (!obj)
754 return -ENOENT;
755
756 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
757
758 drm_gem_object_put(obj);
759
760 return ret;
761}
762
763static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
764 struct drm_file *file)
765{
766 struct drm_msm_gem_cpu_fini *args = data;
767 struct drm_gem_object *obj;
768 int ret;
769
770 obj = drm_gem_object_lookup(file, args->handle);
771 if (!obj)
772 return -ENOENT;
773
774 ret = msm_gem_cpu_fini(obj);
775
776 drm_gem_object_put(obj);
777
778 return ret;
779}
780
781static int msm_ioctl_gem_info_iova(struct drm_device *dev,
782 struct drm_gem_object *obj, uint64_t *iova)
783{
784 struct msm_drm_private *priv = dev->dev_private;
785
786 if (!priv->gpu)
787 return -EINVAL;
788
789 /*
790 * Don't pin the memory here - just get an address so that userspace can
791 * be productive
792 */
793 return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
794}
795
796static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
797 struct drm_file *file)
798{
799 struct drm_msm_gem_info *args = data;
800 struct drm_gem_object *obj;
801 struct msm_gem_object *msm_obj;
802 int i, ret = 0;
803
804 if (args->pad)
805 return -EINVAL;
806
807 switch (args->info) {
808 case MSM_INFO_GET_OFFSET:
809 case MSM_INFO_GET_IOVA:
810 /* value returned as immediate, not pointer, so len==0: */
811 if (args->len)
812 return -EINVAL;
813 break;
814 case MSM_INFO_SET_NAME:
815 case MSM_INFO_GET_NAME:
816 break;
817 default:
818 return -EINVAL;
819 }
820
821 obj = drm_gem_object_lookup(file, args->handle);
822 if (!obj)
823 return -ENOENT;
824
825 msm_obj = to_msm_bo(obj);
826
827 switch (args->info) {
828 case MSM_INFO_GET_OFFSET:
829 args->value = msm_gem_mmap_offset(obj);
830 break;
831 case MSM_INFO_GET_IOVA:
832 ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
833 break;
834 case MSM_INFO_SET_NAME:
835 /* length check should leave room for terminating null: */
836 if (args->len >= sizeof(msm_obj->name)) {
837 ret = -EINVAL;
838 break;
839 }
840 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
841 args->len)) {
842 msm_obj->name[0] = '\0';
843 ret = -EFAULT;
844 break;
845 }
846 msm_obj->name[args->len] = '\0';
847 for (i = 0; i < args->len; i++) {
848 if (!isprint(msm_obj->name[i])) {
849 msm_obj->name[i] = '\0';
850 break;
851 }
852 }
853 break;
854 case MSM_INFO_GET_NAME:
855 if (args->value && (args->len < strlen(msm_obj->name))) {
856 ret = -EINVAL;
857 break;
858 }
859 args->len = strlen(msm_obj->name);
860 if (args->value) {
861 if (copy_to_user(u64_to_user_ptr(args->value),
862 msm_obj->name, args->len))
863 ret = -EFAULT;
864 }
865 break;
866 }
867
868 drm_gem_object_put(obj);
869
870 return ret;
871}
872
873static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
874 struct drm_file *file)
875{
876 struct msm_drm_private *priv = dev->dev_private;
877 struct drm_msm_wait_fence *args = data;
878 ktime_t timeout = to_ktime(args->timeout);
879 struct msm_gpu_submitqueue *queue;
880 struct msm_gpu *gpu = priv->gpu;
881 int ret;
882
883 if (args->pad) {
884 DRM_ERROR("invalid pad: %08x\n", args->pad);
885 return -EINVAL;
886 }
887
888 if (!gpu)
889 return 0;
890
891 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
892 if (!queue)
893 return -ENOENT;
894
895 ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
896 true);
897
898 msm_submitqueue_put(queue);
899 return ret;
900}
901
902static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
903 struct drm_file *file)
904{
905 struct drm_msm_gem_madvise *args = data;
906 struct drm_gem_object *obj;
907 int ret;
908
909 switch (args->madv) {
910 case MSM_MADV_DONTNEED:
911 case MSM_MADV_WILLNEED:
912 break;
913 default:
914 return -EINVAL;
915 }
916
917 ret = mutex_lock_interruptible(&dev->struct_mutex);
918 if (ret)
919 return ret;
920
921 obj = drm_gem_object_lookup(file, args->handle);
922 if (!obj) {
923 ret = -ENOENT;
924 goto unlock;
925 }
926
927 ret = msm_gem_madvise(obj, args->madv);
928 if (ret >= 0) {
929 args->retained = ret;
930 ret = 0;
931 }
932
933 drm_gem_object_put_locked(obj);
934
935unlock:
936 mutex_unlock(&dev->struct_mutex);
937 return ret;
938}
939
940
941static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
942 struct drm_file *file)
943{
944 struct drm_msm_submitqueue *args = data;
945
946 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
947 return -EINVAL;
948
949 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
950 args->flags, &args->id);
951}
952
953static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
954 struct drm_file *file)
955{
956 return msm_submitqueue_query(dev, file->driver_priv, data);
957}
958
959static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
960 struct drm_file *file)
961{
962 u32 id = *(u32 *) data;
963
964 return msm_submitqueue_remove(file->driver_priv, id);
965}
966
967static const struct drm_ioctl_desc msm_ioctls[] = {
968 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
969 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
970 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
971 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
972 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
973 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
974 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
975 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
976 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
977 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
978 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
979};
980
981static const struct vm_operations_struct vm_ops = {
982 .fault = msm_gem_fault,
983 .open = drm_gem_vm_open,
984 .close = drm_gem_vm_close,
985};
986
987static const struct file_operations fops = {
988 .owner = THIS_MODULE,
989 .open = drm_open,
990 .release = drm_release,
991 .unlocked_ioctl = drm_ioctl,
992 .compat_ioctl = drm_compat_ioctl,
993 .poll = drm_poll,
994 .read = drm_read,
995 .llseek = no_llseek,
996 .mmap = msm_gem_mmap,
997};
998
999static struct drm_driver msm_driver = {
1000 .driver_features = DRIVER_GEM |
1001 DRIVER_RENDER |
1002 DRIVER_ATOMIC |
1003 DRIVER_MODESET |
1004 DRIVER_SYNCOBJ,
1005 .open = msm_open,
1006 .postclose = msm_postclose,
1007 .lastclose = drm_fb_helper_lastclose,
1008 .irq_handler = msm_irq,
1009 .irq_preinstall = msm_irq_preinstall,
1010 .irq_postinstall = msm_irq_postinstall,
1011 .irq_uninstall = msm_irq_uninstall,
1012 .gem_free_object_unlocked = msm_gem_free_object,
1013 .gem_vm_ops = &vm_ops,
1014 .dumb_create = msm_gem_dumb_create,
1015 .dumb_map_offset = msm_gem_dumb_map_offset,
1016 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1017 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1018 .gem_prime_pin = msm_gem_prime_pin,
1019 .gem_prime_unpin = msm_gem_prime_unpin,
1020 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
1021 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1022 .gem_prime_vmap = msm_gem_prime_vmap,
1023 .gem_prime_vunmap = msm_gem_prime_vunmap,
1024 .gem_prime_mmap = msm_gem_prime_mmap,
1025#ifdef CONFIG_DEBUG_FS
1026 .debugfs_init = msm_debugfs_init,
1027#endif
1028 .ioctls = msm_ioctls,
1029 .num_ioctls = ARRAY_SIZE(msm_ioctls),
1030 .fops = &fops,
1031 .name = "msm",
1032 .desc = "MSM Snapdragon DRM",
1033 .date = "20130625",
1034 .major = MSM_VERSION_MAJOR,
1035 .minor = MSM_VERSION_MINOR,
1036 .patchlevel = MSM_VERSION_PATCHLEVEL,
1037};
1038
1039static int __maybe_unused msm_runtime_suspend(struct device *dev)
1040{
1041 struct drm_device *ddev = dev_get_drvdata(dev);
1042 struct msm_drm_private *priv = ddev->dev_private;
1043 struct msm_mdss *mdss = priv->mdss;
1044
1045 DBG("");
1046
1047 if (mdss && mdss->funcs)
1048 return mdss->funcs->disable(mdss);
1049
1050 return 0;
1051}
1052
1053static int __maybe_unused msm_runtime_resume(struct device *dev)
1054{
1055 struct drm_device *ddev = dev_get_drvdata(dev);
1056 struct msm_drm_private *priv = ddev->dev_private;
1057 struct msm_mdss *mdss = priv->mdss;
1058
1059 DBG("");
1060
1061 if (mdss && mdss->funcs)
1062 return mdss->funcs->enable(mdss);
1063
1064 return 0;
1065}
1066
1067static int __maybe_unused msm_pm_suspend(struct device *dev)
1068{
1069
1070 if (pm_runtime_suspended(dev))
1071 return 0;
1072
1073 return msm_runtime_suspend(dev);
1074}
1075
1076static int __maybe_unused msm_pm_resume(struct device *dev)
1077{
1078 if (pm_runtime_suspended(dev))
1079 return 0;
1080
1081 return msm_runtime_resume(dev);
1082}
1083
1084static int __maybe_unused msm_pm_prepare(struct device *dev)
1085{
1086 struct drm_device *ddev = dev_get_drvdata(dev);
1087
1088 return drm_mode_config_helper_suspend(ddev);
1089}
1090
1091static void __maybe_unused msm_pm_complete(struct device *dev)
1092{
1093 struct drm_device *ddev = dev_get_drvdata(dev);
1094
1095 drm_mode_config_helper_resume(ddev);
1096}
1097
1098static const struct dev_pm_ops msm_pm_ops = {
1099 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
1100 SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
1101 .prepare = msm_pm_prepare,
1102 .complete = msm_pm_complete,
1103};
1104
1105/*
1106 * Componentized driver support:
1107 */
1108
1109/*
1110 * NOTE: duplication of the same code as exynos or imx (or probably any other).
1111 * so probably some room for some helpers
1112 */
1113static int compare_of(struct device *dev, void *data)
1114{
1115 return dev->of_node == data;
1116}
1117
1118/*
1119 * Identify what components need to be added by parsing what remote-endpoints
1120 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
1121 * is no external component that we need to add since LVDS is within MDP4
1122 * itself.
1123 */
1124static int add_components_mdp(struct device *mdp_dev,
1125 struct component_match **matchptr)
1126{
1127 struct device_node *np = mdp_dev->of_node;
1128 struct device_node *ep_node;
1129 struct device *master_dev;
1130
1131 /*
1132 * on MDP4 based platforms, the MDP platform device is the component
1133 * master that adds other display interface components to itself.
1134 *
1135 * on MDP5 based platforms, the MDSS platform device is the component
1136 * master that adds MDP5 and other display interface components to
1137 * itself.
1138 */
1139 if (of_device_is_compatible(np, "qcom,mdp4"))
1140 master_dev = mdp_dev;
1141 else
1142 master_dev = mdp_dev->parent;
1143
1144 for_each_endpoint_of_node(np, ep_node) {
1145 struct device_node *intf;
1146 struct of_endpoint ep;
1147 int ret;
1148
1149 ret = of_graph_parse_endpoint(ep_node, &ep);
1150 if (ret) {
1151 DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
1152 of_node_put(ep_node);
1153 return ret;
1154 }
1155
1156 /*
1157 * The LCDC/LVDS port on MDP4 is a speacial case where the
1158 * remote-endpoint isn't a component that we need to add
1159 */
1160 if (of_device_is_compatible(np, "qcom,mdp4") &&
1161 ep.port == 0)
1162 continue;
1163
1164 /*
1165 * It's okay if some of the ports don't have a remote endpoint
1166 * specified. It just means that the port isn't connected to
1167 * any external interface.
1168 */
1169 intf = of_graph_get_remote_port_parent(ep_node);
1170 if (!intf)
1171 continue;
1172
1173 if (of_device_is_available(intf))
1174 drm_of_component_match_add(master_dev, matchptr,
1175 compare_of, intf);
1176
1177 of_node_put(intf);
1178 }
1179
1180 return 0;
1181}
1182
1183static int compare_name_mdp(struct device *dev, void *data)
1184{
1185 return (strstr(dev_name(dev), "mdp") != NULL);
1186}
1187
1188static int add_display_components(struct device *dev,
1189 struct component_match **matchptr)
1190{
1191 struct device *mdp_dev;
1192 int ret;
1193
1194 /*
1195 * MDP5/DPU based devices don't have a flat hierarchy. There is a top
1196 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
1197 * Populate the children devices, find the MDP5/DPU node, and then add
1198 * the interfaces to our components list.
1199 */
1200 if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
1201 of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") ||
1202 of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) {
1203 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
1204 if (ret) {
1205 DRM_DEV_ERROR(dev, "failed to populate children devices\n");
1206 return ret;
1207 }
1208
1209 mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
1210 if (!mdp_dev) {
1211 DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
1212 of_platform_depopulate(dev);
1213 return -ENODEV;
1214 }
1215
1216 put_device(mdp_dev);
1217
1218 /* add the MDP component itself */
1219 drm_of_component_match_add(dev, matchptr, compare_of,
1220 mdp_dev->of_node);
1221 } else {
1222 /* MDP4 */
1223 mdp_dev = dev;
1224 }
1225
1226 ret = add_components_mdp(mdp_dev, matchptr);
1227 if (ret)
1228 of_platform_depopulate(dev);
1229
1230 return ret;
1231}
1232
1233/*
1234 * We don't know what's the best binding to link the gpu with the drm device.
1235 * Fow now, we just hunt for all the possible gpus that we support, and add them
1236 * as components.
1237 */
1238static const struct of_device_id msm_gpu_match[] = {
1239 { .compatible = "qcom,adreno" },
1240 { .compatible = "qcom,adreno-3xx" },
1241 { .compatible = "amd,imageon" },
1242 { .compatible = "qcom,kgsl-3d0" },
1243 { },
1244};
1245
1246static int add_gpu_components(struct device *dev,
1247 struct component_match **matchptr)
1248{
1249 struct device_node *np;
1250
1251 np = of_find_matching_node(NULL, msm_gpu_match);
1252 if (!np)
1253 return 0;
1254
1255 if (of_device_is_available(np))
1256 drm_of_component_match_add(dev, matchptr, compare_of, np);
1257
1258 of_node_put(np);
1259
1260 return 0;
1261}
1262
1263static int msm_drm_bind(struct device *dev)
1264{
1265 return msm_drm_init(dev, &msm_driver);
1266}
1267
1268static void msm_drm_unbind(struct device *dev)
1269{
1270 msm_drm_uninit(dev);
1271}
1272
1273static const struct component_master_ops msm_drm_ops = {
1274 .bind = msm_drm_bind,
1275 .unbind = msm_drm_unbind,
1276};
1277
1278/*
1279 * Platform driver:
1280 */
1281
1282static int msm_pdev_probe(struct platform_device *pdev)
1283{
1284 struct component_match *match = NULL;
1285 int ret;
1286
1287 if (get_mdp_ver(pdev)) {
1288 ret = add_display_components(&pdev->dev, &match);
1289 if (ret)
1290 return ret;
1291 }
1292
1293 ret = add_gpu_components(&pdev->dev, &match);
1294 if (ret)
1295 goto fail;
1296
1297 /* on all devices that I am aware of, iommu's which can map
1298 * any address the cpu can see are used:
1299 */
1300 ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
1301 if (ret)
1302 goto fail;
1303
1304 ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
1305 if (ret)
1306 goto fail;
1307
1308 return 0;
1309
1310fail:
1311 of_platform_depopulate(&pdev->dev);
1312 return ret;
1313}
1314
1315static int msm_pdev_remove(struct platform_device *pdev)
1316{
1317 component_master_del(&pdev->dev, &msm_drm_ops);
1318 of_platform_depopulate(&pdev->dev);
1319
1320 return 0;
1321}
1322
1323static void msm_pdev_shutdown(struct platform_device *pdev)
1324{
1325 struct drm_device *drm = platform_get_drvdata(pdev);
1326
1327 drm_atomic_helper_shutdown(drm);
1328}
1329
1330static const struct of_device_id dt_match[] = {
1331 { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
1332 { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
1333 { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
1334 { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
1335 {}
1336};
1337MODULE_DEVICE_TABLE(of, dt_match);
1338
1339static struct platform_driver msm_platform_driver = {
1340 .probe = msm_pdev_probe,
1341 .remove = msm_pdev_remove,
1342 .shutdown = msm_pdev_shutdown,
1343 .driver = {
1344 .name = "msm",
1345 .of_match_table = dt_match,
1346 .pm = &msm_pm_ops,
1347 },
1348};
1349
1350static int __init msm_drm_register(void)
1351{
1352 if (!modeset)
1353 return -EINVAL;
1354
1355 DBG("init");
1356 msm_mdp_register();
1357 msm_dpu_register();
1358 msm_dsi_register();
1359 msm_edp_register();
1360 msm_hdmi_register();
1361 adreno_register();
1362 return platform_driver_register(&msm_platform_driver);
1363}
1364
1365static void __exit msm_drm_unregister(void)
1366{
1367 DBG("fini");
1368 platform_driver_unregister(&msm_platform_driver);
1369 msm_hdmi_unregister();
1370 adreno_unregister();
1371 msm_edp_unregister();
1372 msm_dsi_unregister();
1373 msm_mdp_unregister();
1374 msm_dpu_unregister();
1375}
1376
1377module_init(msm_drm_register);
1378module_exit(msm_drm_unregister);
1379
1380MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1381MODULE_DESCRIPTION("MSM DRM Driver");
1382MODULE_LICENSE("GPL");