Loading...
1/*
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/pm_runtime.h>
15#include <drm/drmP.h>
16#include <drm/drm_atomic.h>
17#include <drm/drm_atomic_helper.h>
18#include <drm/drm_crtc_helper.h>
19
20#include <linux/component.h>
21
22#include <drm/exynos_drm.h>
23
24#include "exynos_drm_drv.h"
25#include "exynos_drm_crtc.h"
26#include "exynos_drm_fbdev.h"
27#include "exynos_drm_fb.h"
28#include "exynos_drm_gem.h"
29#include "exynos_drm_plane.h"
30#include "exynos_drm_vidi.h"
31#include "exynos_drm_g2d.h"
32#include "exynos_drm_ipp.h"
33#include "exynos_drm_iommu.h"
34
35#define DRIVER_NAME "exynos"
36#define DRIVER_DESC "Samsung SoC DRM"
37#define DRIVER_DATE "20110530"
38#define DRIVER_MAJOR 1
39#define DRIVER_MINOR 0
40
41struct exynos_atomic_commit {
42 struct work_struct work;
43 struct drm_device *dev;
44 struct drm_atomic_state *state;
45 u32 crtcs;
46};
47
48static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state)
49{
50 struct drm_crtc_state *crtc_state;
51 struct drm_crtc *crtc;
52 int i, ret;
53
54 for_each_crtc_in_state(state, crtc, crtc_state, i) {
55 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
56
57 if (!crtc->state->enable)
58 continue;
59
60 ret = drm_crtc_vblank_get(crtc);
61 if (ret)
62 continue;
63
64 exynos_drm_crtc_wait_pending_update(exynos_crtc);
65 drm_crtc_vblank_put(crtc);
66 }
67}
68
69static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
70{
71 struct drm_device *dev = commit->dev;
72 struct exynos_drm_private *priv = dev->dev_private;
73 struct drm_atomic_state *state = commit->state;
74 struct drm_plane *plane;
75 struct drm_crtc *crtc;
76 struct drm_plane_state *plane_state;
77 struct drm_crtc_state *crtc_state;
78 int i;
79
80 drm_atomic_helper_commit_modeset_disables(dev, state);
81
82 drm_atomic_helper_commit_modeset_enables(dev, state);
83
84 /*
85 * Exynos can't update planes with CRTCs and encoders disabled,
86 * its updates routines, specially for FIMD, requires the clocks
87 * to be enabled. So it is necessary to handle the modeset operations
88 * *before* the commit_planes() step, this way it will always
89 * have the relevant clocks enabled to perform the update.
90 */
91
92 for_each_crtc_in_state(state, crtc, crtc_state, i) {
93 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
94
95 atomic_set(&exynos_crtc->pending_update, 0);
96 }
97
98 for_each_plane_in_state(state, plane, plane_state, i) {
99 struct exynos_drm_crtc *exynos_crtc =
100 to_exynos_crtc(plane->crtc);
101
102 if (!plane->crtc)
103 continue;
104
105 atomic_inc(&exynos_crtc->pending_update);
106 }
107
108 drm_atomic_helper_commit_planes(dev, state, false);
109
110 exynos_atomic_wait_for_commit(state);
111
112 drm_atomic_helper_cleanup_planes(dev, state);
113
114 drm_atomic_state_free(state);
115
116 spin_lock(&priv->lock);
117 priv->pending &= ~commit->crtcs;
118 spin_unlock(&priv->lock);
119
120 wake_up_all(&priv->wait);
121
122 kfree(commit);
123}
124
125static void exynos_drm_atomic_work(struct work_struct *work)
126{
127 struct exynos_atomic_commit *commit = container_of(work,
128 struct exynos_atomic_commit, work);
129
130 exynos_atomic_commit_complete(commit);
131}
132
133static struct device *exynos_drm_get_dma_device(void);
134
135static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
136{
137 struct exynos_drm_private *private;
138 struct drm_encoder *encoder;
139 unsigned int clone_mask;
140 int cnt, ret;
141
142 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
143 if (!private)
144 return -ENOMEM;
145
146 init_waitqueue_head(&private->wait);
147 spin_lock_init(&private->lock);
148
149 dev_set_drvdata(dev->dev, dev);
150 dev->dev_private = (void *)private;
151
152 /* the first real CRTC device is used for all dma mapping operations */
153 private->dma_dev = exynos_drm_get_dma_device();
154 if (!private->dma_dev) {
155 DRM_ERROR("no device found for DMA mapping operations.\n");
156 ret = -ENODEV;
157 goto err_free_private;
158 }
159 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
160 dev_name(private->dma_dev));
161
162 /*
163 * create mapping to manage iommu table and set a pointer to iommu
164 * mapping structure to iommu_mapping of private data.
165 * also this iommu_mapping can be used to check if iommu is supported
166 * or not.
167 */
168 ret = drm_create_iommu_mapping(dev);
169 if (ret < 0) {
170 DRM_ERROR("failed to create iommu mapping.\n");
171 goto err_free_private;
172 }
173
174 drm_mode_config_init(dev);
175
176 exynos_drm_mode_config_init(dev);
177
178 /* setup possible_clones. */
179 cnt = 0;
180 clone_mask = 0;
181 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
182 clone_mask |= (1 << (cnt++));
183
184 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
185 encoder->possible_clones = clone_mask;
186
187 platform_set_drvdata(dev->platformdev, dev);
188
189 /* Try to bind all sub drivers. */
190 ret = component_bind_all(dev->dev, dev);
191 if (ret)
192 goto err_mode_config_cleanup;
193
194 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
195 if (ret)
196 goto err_unbind_all;
197
198 /* Probe non kms sub drivers and virtual display driver. */
199 ret = exynos_drm_device_subdrv_probe(dev);
200 if (ret)
201 goto err_cleanup_vblank;
202
203 drm_mode_config_reset(dev);
204
205 /*
206 * enable drm irq mode.
207 * - with irq_enabled = true, we can use the vblank feature.
208 *
209 * P.S. note that we wouldn't use drm irq handler but
210 * just specific driver own one instead because
211 * drm framework supports only one irq handler.
212 */
213 dev->irq_enabled = true;
214
215 /*
216 * with vblank_disable_allowed = true, vblank interrupt will be disabled
217 * by drm timer once a current process gives up ownership of
218 * vblank event.(after drm_vblank_put function is called)
219 */
220 dev->vblank_disable_allowed = true;
221
222 /* init kms poll for handling hpd */
223 drm_kms_helper_poll_init(dev);
224
225 /* force connectors detection */
226 drm_helper_hpd_irq_event(dev);
227
228 return 0;
229
230err_cleanup_vblank:
231 drm_vblank_cleanup(dev);
232err_unbind_all:
233 component_unbind_all(dev->dev, dev);
234err_mode_config_cleanup:
235 drm_mode_config_cleanup(dev);
236 drm_release_iommu_mapping(dev);
237err_free_private:
238 kfree(private);
239
240 return ret;
241}
242
243static int exynos_drm_unload(struct drm_device *dev)
244{
245 exynos_drm_device_subdrv_remove(dev);
246
247 exynos_drm_fbdev_fini(dev);
248 drm_kms_helper_poll_fini(dev);
249
250 drm_vblank_cleanup(dev);
251 component_unbind_all(dev->dev, dev);
252 drm_mode_config_cleanup(dev);
253 drm_release_iommu_mapping(dev);
254
255 kfree(dev->dev_private);
256 dev->dev_private = NULL;
257
258 return 0;
259}
260
261static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
262{
263 bool pending;
264
265 spin_lock(&priv->lock);
266 pending = priv->pending & crtcs;
267 spin_unlock(&priv->lock);
268
269 return pending;
270}
271
272int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
273 bool async)
274{
275 struct exynos_drm_private *priv = dev->dev_private;
276 struct exynos_atomic_commit *commit;
277 int i, ret;
278
279 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
280 if (!commit)
281 return -ENOMEM;
282
283 ret = drm_atomic_helper_prepare_planes(dev, state);
284 if (ret) {
285 kfree(commit);
286 return ret;
287 }
288
289 /* This is the point of no return */
290
291 INIT_WORK(&commit->work, exynos_drm_atomic_work);
292 commit->dev = dev;
293 commit->state = state;
294
295 /* Wait until all affected CRTCs have completed previous commits and
296 * mark them as pending.
297 */
298 for (i = 0; i < dev->mode_config.num_crtc; ++i) {
299 if (state->crtcs[i])
300 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
301 }
302
303 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
304
305 spin_lock(&priv->lock);
306 priv->pending |= commit->crtcs;
307 spin_unlock(&priv->lock);
308
309 drm_atomic_helper_swap_state(dev, state);
310
311 if (async)
312 schedule_work(&commit->work);
313 else
314 exynos_atomic_commit_complete(commit);
315
316 return 0;
317}
318
319static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
320{
321 struct drm_exynos_file_private *file_priv;
322 int ret;
323
324 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
325 if (!file_priv)
326 return -ENOMEM;
327
328 file->driver_priv = file_priv;
329
330 ret = exynos_drm_subdrv_open(dev, file);
331 if (ret)
332 goto err_file_priv_free;
333
334 return ret;
335
336err_file_priv_free:
337 kfree(file_priv);
338 file->driver_priv = NULL;
339 return ret;
340}
341
342static void exynos_drm_preclose(struct drm_device *dev,
343 struct drm_file *file)
344{
345 struct drm_crtc *crtc;
346
347 exynos_drm_subdrv_close(dev, file);
348
349 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
350 exynos_drm_crtc_cancel_page_flip(crtc, file);
351}
352
353static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
354{
355 kfree(file->driver_priv);
356 file->driver_priv = NULL;
357}
358
359static void exynos_drm_lastclose(struct drm_device *dev)
360{
361 exynos_drm_fbdev_restore_mode(dev);
362}
363
364static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
365 .fault = exynos_drm_gem_fault,
366 .open = drm_gem_vm_open,
367 .close = drm_gem_vm_close,
368};
369
370static const struct drm_ioctl_desc exynos_ioctls[] = {
371 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
372 DRM_AUTH | DRM_RENDER_ALLOW),
373 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP, exynos_drm_gem_map_ioctl,
374 DRM_AUTH | DRM_RENDER_ALLOW),
375 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
376 DRM_RENDER_ALLOW),
377 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
378 DRM_AUTH),
379 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
380 DRM_AUTH | DRM_RENDER_ALLOW),
381 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
382 DRM_AUTH | DRM_RENDER_ALLOW),
383 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
384 DRM_AUTH | DRM_RENDER_ALLOW),
385 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
386 DRM_AUTH | DRM_RENDER_ALLOW),
387 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
388 DRM_AUTH | DRM_RENDER_ALLOW),
389 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
390 DRM_AUTH | DRM_RENDER_ALLOW),
391 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
392 DRM_AUTH | DRM_RENDER_ALLOW),
393};
394
395static const struct file_operations exynos_drm_driver_fops = {
396 .owner = THIS_MODULE,
397 .open = drm_open,
398 .mmap = exynos_drm_gem_mmap,
399 .poll = drm_poll,
400 .read = drm_read,
401 .unlocked_ioctl = drm_ioctl,
402#ifdef CONFIG_COMPAT
403 .compat_ioctl = drm_compat_ioctl,
404#endif
405 .release = drm_release,
406};
407
408static struct drm_driver exynos_drm_driver = {
409 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
410 | DRIVER_ATOMIC | DRIVER_RENDER,
411 .load = exynos_drm_load,
412 .unload = exynos_drm_unload,
413 .open = exynos_drm_open,
414 .preclose = exynos_drm_preclose,
415 .lastclose = exynos_drm_lastclose,
416 .postclose = exynos_drm_postclose,
417 .set_busid = drm_platform_set_busid,
418 .get_vblank_counter = drm_vblank_no_hw_counter,
419 .enable_vblank = exynos_drm_crtc_enable_vblank,
420 .disable_vblank = exynos_drm_crtc_disable_vblank,
421 .gem_free_object = exynos_drm_gem_free_object,
422 .gem_vm_ops = &exynos_drm_gem_vm_ops,
423 .dumb_create = exynos_drm_gem_dumb_create,
424 .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
425 .dumb_destroy = drm_gem_dumb_destroy,
426 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
427 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
428 .gem_prime_export = drm_gem_prime_export,
429 .gem_prime_import = drm_gem_prime_import,
430 .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
431 .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
432 .gem_prime_vmap = exynos_drm_gem_prime_vmap,
433 .gem_prime_vunmap = exynos_drm_gem_prime_vunmap,
434 .ioctls = exynos_ioctls,
435 .num_ioctls = ARRAY_SIZE(exynos_ioctls),
436 .fops = &exynos_drm_driver_fops,
437 .name = DRIVER_NAME,
438 .desc = DRIVER_DESC,
439 .date = DRIVER_DATE,
440 .major = DRIVER_MAJOR,
441 .minor = DRIVER_MINOR,
442};
443
444#ifdef CONFIG_PM_SLEEP
445static int exynos_drm_suspend(struct device *dev)
446{
447 struct drm_device *drm_dev = dev_get_drvdata(dev);
448 struct drm_connector *connector;
449
450 if (pm_runtime_suspended(dev) || !drm_dev)
451 return 0;
452
453 drm_modeset_lock_all(drm_dev);
454 drm_for_each_connector(connector, drm_dev) {
455 int old_dpms = connector->dpms;
456
457 if (connector->funcs->dpms)
458 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
459
460 /* Set the old mode back to the connector for resume */
461 connector->dpms = old_dpms;
462 }
463 drm_modeset_unlock_all(drm_dev);
464
465 return 0;
466}
467
468static int exynos_drm_resume(struct device *dev)
469{
470 struct drm_device *drm_dev = dev_get_drvdata(dev);
471 struct drm_connector *connector;
472
473 if (pm_runtime_suspended(dev) || !drm_dev)
474 return 0;
475
476 drm_modeset_lock_all(drm_dev);
477 drm_for_each_connector(connector, drm_dev) {
478 if (connector->funcs->dpms) {
479 int dpms = connector->dpms;
480
481 connector->dpms = DRM_MODE_DPMS_OFF;
482 connector->funcs->dpms(connector, dpms);
483 }
484 }
485 drm_modeset_unlock_all(drm_dev);
486
487 return 0;
488}
489#endif
490
491static const struct dev_pm_ops exynos_drm_pm_ops = {
492 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
493};
494
495/* forward declaration */
496static struct platform_driver exynos_drm_platform_driver;
497
498struct exynos_drm_driver_info {
499 struct platform_driver *driver;
500 unsigned int flags;
501};
502
503#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
504#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
505#define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */
506
507#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
508
509/*
510 * Connector drivers should not be placed before associated crtc drivers,
511 * because connector requires pipe number of its crtc during initialization.
512 */
513static struct exynos_drm_driver_info exynos_drm_drivers[] = {
514 {
515 DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
516 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
517 }, {
518 DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
519 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
520 }, {
521 DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
522 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
523 }, {
524 DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
525 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
526 }, {
527 DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
528 DRM_COMPONENT_DRIVER
529 }, {
530 DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
531 DRM_COMPONENT_DRIVER
532 }, {
533 DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
534 DRM_COMPONENT_DRIVER
535 }, {
536 DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
537 DRM_COMPONENT_DRIVER
538 }, {
539 DRV_PTR(vidi_driver, CONFIG_DRM_EXYNOS_VIDI),
540 DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
541 }, {
542 DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
543 }, {
544 DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
545 }, {
546 DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
547 }, {
548 DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
549 }, {
550 DRV_PTR(ipp_driver, CONFIG_DRM_EXYNOS_IPP),
551 DRM_VIRTUAL_DEVICE
552 }, {
553 &exynos_drm_platform_driver,
554 DRM_VIRTUAL_DEVICE
555 }
556};
557
558static int compare_dev(struct device *dev, void *data)
559{
560 return dev == (struct device *)data;
561}
562
563static struct component_match *exynos_drm_match_add(struct device *dev)
564{
565 struct component_match *match = NULL;
566 int i;
567
568 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
569 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
570 struct device *p = NULL, *d;
571
572 if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER))
573 continue;
574
575 while ((d = bus_find_device(&platform_bus_type, p,
576 &info->driver->driver,
577 (void *)platform_bus_type.match))) {
578 put_device(p);
579 component_match_add(dev, &match, compare_dev, d);
580 p = d;
581 }
582 put_device(p);
583 }
584
585 return match ?: ERR_PTR(-ENODEV);
586}
587
588static int exynos_drm_bind(struct device *dev)
589{
590 return drm_platform_init(&exynos_drm_driver, to_platform_device(dev));
591}
592
593static void exynos_drm_unbind(struct device *dev)
594{
595 drm_put_dev(dev_get_drvdata(dev));
596}
597
598static const struct component_master_ops exynos_drm_ops = {
599 .bind = exynos_drm_bind,
600 .unbind = exynos_drm_unbind,
601};
602
603static int exynos_drm_platform_probe(struct platform_device *pdev)
604{
605 struct component_match *match;
606
607 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
608 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
609
610 match = exynos_drm_match_add(&pdev->dev);
611 if (IS_ERR(match))
612 return PTR_ERR(match);
613
614 return component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
615 match);
616}
617
618static int exynos_drm_platform_remove(struct platform_device *pdev)
619{
620 component_master_del(&pdev->dev, &exynos_drm_ops);
621 return 0;
622}
623
624static struct platform_driver exynos_drm_platform_driver = {
625 .probe = exynos_drm_platform_probe,
626 .remove = exynos_drm_platform_remove,
627 .driver = {
628 .name = "exynos-drm",
629 .pm = &exynos_drm_pm_ops,
630 },
631};
632
633static struct device *exynos_drm_get_dma_device(void)
634{
635 int i;
636
637 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
638 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
639 struct device *dev;
640
641 if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
642 continue;
643
644 while ((dev = bus_find_device(&platform_bus_type, NULL,
645 &info->driver->driver,
646 (void *)platform_bus_type.match))) {
647 put_device(dev);
648 return dev;
649 }
650 }
651 return NULL;
652}
653
654static void exynos_drm_unregister_devices(void)
655{
656 int i;
657
658 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
659 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
660 struct device *dev;
661
662 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
663 continue;
664
665 while ((dev = bus_find_device(&platform_bus_type, NULL,
666 &info->driver->driver,
667 (void *)platform_bus_type.match))) {
668 put_device(dev);
669 platform_device_unregister(to_platform_device(dev));
670 }
671 }
672}
673
674static int exynos_drm_register_devices(void)
675{
676 struct platform_device *pdev;
677 int i;
678
679 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
680 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
681
682 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
683 continue;
684
685 pdev = platform_device_register_simple(
686 info->driver->driver.name, -1, NULL, 0);
687 if (IS_ERR(pdev))
688 goto fail;
689 }
690
691 return 0;
692fail:
693 exynos_drm_unregister_devices();
694 return PTR_ERR(pdev);
695}
696
697static void exynos_drm_unregister_drivers(void)
698{
699 int i;
700
701 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
702 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
703
704 if (!info->driver)
705 continue;
706
707 platform_driver_unregister(info->driver);
708 }
709}
710
711static int exynos_drm_register_drivers(void)
712{
713 int i, ret;
714
715 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
716 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
717
718 if (!info->driver)
719 continue;
720
721 ret = platform_driver_register(info->driver);
722 if (ret)
723 goto fail;
724 }
725 return 0;
726fail:
727 exynos_drm_unregister_drivers();
728 return ret;
729}
730
731static int exynos_drm_init(void)
732{
733 int ret;
734
735 ret = exynos_drm_register_devices();
736 if (ret)
737 return ret;
738
739 ret = exynos_drm_register_drivers();
740 if (ret)
741 goto err_unregister_pdevs;
742
743 return 0;
744
745err_unregister_pdevs:
746 exynos_drm_unregister_devices();
747
748 return ret;
749}
750
751static void exynos_drm_exit(void)
752{
753 exynos_drm_unregister_drivers();
754 exynos_drm_unregister_devices();
755}
756
757module_init(exynos_drm_init);
758module_exit(exynos_drm_exit);
759
760MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
761MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
762MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
763MODULE_DESCRIPTION("Samsung SoC DRM Driver");
764MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authors:
5 * Inki Dae <inki.dae@samsung.com>
6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 */
9
10#include <linux/component.h>
11#include <linux/dma-mapping.h>
12#include <linux/platform_device.h>
13#include <linux/pm_runtime.h>
14#include <linux/uaccess.h>
15
16#include <drm/drm_atomic.h>
17#include <drm/drm_atomic_helper.h>
18#include <drm/drm_drv.h>
19#include <drm/drm_fb_helper.h>
20#include <drm/drm_file.h>
21#include <drm/drm_fourcc.h>
22#include <drm/drm_ioctl.h>
23#include <drm/drm_probe_helper.h>
24#include <drm/drm_vblank.h>
25#include <drm/exynos_drm.h>
26
27#include "exynos_drm_drv.h"
28#include "exynos_drm_fb.h"
29#include "exynos_drm_fbdev.h"
30#include "exynos_drm_g2d.h"
31#include "exynos_drm_gem.h"
32#include "exynos_drm_ipp.h"
33#include "exynos_drm_plane.h"
34#include "exynos_drm_vidi.h"
35
36#define DRIVER_NAME "exynos"
37#define DRIVER_DESC "Samsung SoC DRM"
38#define DRIVER_DATE "20180330"
39
40/*
41 * Interface history:
42 *
43 * 1.0 - Original version
44 * 1.1 - Upgrade IPP driver to version 2.0
45 */
46#define DRIVER_MAJOR 1
47#define DRIVER_MINOR 1
48
49static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
50{
51 struct drm_exynos_file_private *file_priv;
52 int ret;
53
54 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
55 if (!file_priv)
56 return -ENOMEM;
57
58 file->driver_priv = file_priv;
59 ret = g2d_open(dev, file);
60 if (ret)
61 goto err_file_priv_free;
62
63 return ret;
64
65err_file_priv_free:
66 kfree(file_priv);
67 file->driver_priv = NULL;
68 return ret;
69}
70
71static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
72{
73 g2d_close(dev, file);
74 kfree(file->driver_priv);
75 file->driver_priv = NULL;
76}
77
78static const struct drm_ioctl_desc exynos_ioctls[] = {
79 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
80 DRM_RENDER_ALLOW),
81 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP, exynos_drm_gem_map_ioctl,
82 DRM_RENDER_ALLOW),
83 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
84 DRM_RENDER_ALLOW),
85 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
86 DRM_AUTH),
87 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
88 DRM_RENDER_ALLOW),
89 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
90 DRM_RENDER_ALLOW),
91 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
92 DRM_RENDER_ALLOW),
93 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_RESOURCES,
94 exynos_drm_ipp_get_res_ioctl,
95 DRM_RENDER_ALLOW),
96 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_CAPS, exynos_drm_ipp_get_caps_ioctl,
97 DRM_RENDER_ALLOW),
98 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_LIMITS,
99 exynos_drm_ipp_get_limits_ioctl,
100 DRM_RENDER_ALLOW),
101 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_COMMIT, exynos_drm_ipp_commit_ioctl,
102 DRM_RENDER_ALLOW),
103};
104
105static const struct file_operations exynos_drm_driver_fops = {
106 .owner = THIS_MODULE,
107 .open = drm_open,
108 .mmap = exynos_drm_gem_mmap,
109 .poll = drm_poll,
110 .read = drm_read,
111 .unlocked_ioctl = drm_ioctl,
112 .compat_ioctl = drm_compat_ioctl,
113 .release = drm_release,
114};
115
116static const struct drm_driver exynos_drm_driver = {
117 .driver_features = DRIVER_MODESET | DRIVER_GEM
118 | DRIVER_ATOMIC | DRIVER_RENDER,
119 .open = exynos_drm_open,
120 .lastclose = drm_fb_helper_lastclose,
121 .postclose = exynos_drm_postclose,
122 .dumb_create = exynos_drm_gem_dumb_create,
123 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
124 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
125 .gem_prime_import = exynos_drm_gem_prime_import,
126 .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
127 .gem_prime_mmap = exynos_drm_gem_prime_mmap,
128 .ioctls = exynos_ioctls,
129 .num_ioctls = ARRAY_SIZE(exynos_ioctls),
130 .fops = &exynos_drm_driver_fops,
131 .name = DRIVER_NAME,
132 .desc = DRIVER_DESC,
133 .date = DRIVER_DATE,
134 .major = DRIVER_MAJOR,
135 .minor = DRIVER_MINOR,
136};
137
138static int exynos_drm_suspend(struct device *dev)
139{
140 struct drm_device *drm_dev = dev_get_drvdata(dev);
141
142 return drm_mode_config_helper_suspend(drm_dev);
143}
144
145static void exynos_drm_resume(struct device *dev)
146{
147 struct drm_device *drm_dev = dev_get_drvdata(dev);
148
149 drm_mode_config_helper_resume(drm_dev);
150}
151
152static const struct dev_pm_ops exynos_drm_pm_ops = {
153 .prepare = exynos_drm_suspend,
154 .complete = exynos_drm_resume,
155};
156
157/* forward declaration */
158static struct platform_driver exynos_drm_platform_driver;
159
160struct exynos_drm_driver_info {
161 struct platform_driver *driver;
162 unsigned int flags;
163};
164
165#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
166#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
167#define DRM_FIMC_DEVICE BIT(2) /* devices shared with V4L2 subsystem */
168
169#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
170
171/*
172 * Connector drivers should not be placed before associated crtc drivers,
173 * because connector requires pipe number of its crtc during initialization.
174 */
175static struct exynos_drm_driver_info exynos_drm_drivers[] = {
176 {
177 DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
178 DRM_COMPONENT_DRIVER
179 }, {
180 DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
181 DRM_COMPONENT_DRIVER
182 }, {
183 DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
184 DRM_COMPONENT_DRIVER
185 }, {
186 DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
187 DRM_COMPONENT_DRIVER
188 }, {
189 DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
190 DRM_COMPONENT_DRIVER
191 }, {
192 DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
193 DRM_COMPONENT_DRIVER
194 }, {
195 DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
196 DRM_COMPONENT_DRIVER
197 }, {
198 DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
199 DRM_COMPONENT_DRIVER
200 }, {
201 DRV_PTR(vidi_driver, CONFIG_DRM_EXYNOS_VIDI),
202 DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
203 }, {
204 DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
205 DRM_COMPONENT_DRIVER
206 }, {
207 DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
208 DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE,
209 }, {
210 DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
211 DRM_COMPONENT_DRIVER
212 }, {
213 DRV_PTR(scaler_driver, CONFIG_DRM_EXYNOS_SCALER),
214 DRM_COMPONENT_DRIVER
215 }, {
216 DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
217 DRM_COMPONENT_DRIVER
218 }, {
219 &exynos_drm_platform_driver,
220 DRM_VIRTUAL_DEVICE
221 }
222};
223
224static int compare_dev(struct device *dev, void *data)
225{
226 return dev == (struct device *)data;
227}
228
229static struct component_match *exynos_drm_match_add(struct device *dev)
230{
231 struct component_match *match = NULL;
232 int i;
233
234 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
235 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
236 struct device *p = NULL, *d;
237
238 if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER))
239 continue;
240
241 while ((d = platform_find_device_by_driver(p, &info->driver->driver))) {
242 put_device(p);
243
244 if (!(info->flags & DRM_FIMC_DEVICE) ||
245 exynos_drm_check_fimc_device(d) == 0)
246 component_match_add(dev, &match,
247 compare_dev, d);
248 p = d;
249 }
250 put_device(p);
251 }
252
253 return match ?: ERR_PTR(-ENODEV);
254}
255
256static int exynos_drm_bind(struct device *dev)
257{
258 struct exynos_drm_private *private;
259 struct drm_encoder *encoder;
260 struct drm_device *drm;
261 unsigned int clone_mask;
262 int ret;
263
264 drm = drm_dev_alloc(&exynos_drm_driver, dev);
265 if (IS_ERR(drm))
266 return PTR_ERR(drm);
267
268 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
269 if (!private) {
270 ret = -ENOMEM;
271 goto err_free_drm;
272 }
273
274 init_waitqueue_head(&private->wait);
275 spin_lock_init(&private->lock);
276
277 dev_set_drvdata(dev, drm);
278 drm->dev_private = (void *)private;
279
280 drm_mode_config_init(drm);
281
282 exynos_drm_mode_config_init(drm);
283
284 /* setup possible_clones. */
285 clone_mask = 0;
286 list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
287 clone_mask |= drm_encoder_mask(encoder);
288
289 list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
290 encoder->possible_clones = clone_mask;
291
292 /* Try to bind all sub drivers. */
293 ret = component_bind_all(drm->dev, drm);
294 if (ret)
295 goto err_mode_config_cleanup;
296
297 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
298 if (ret)
299 goto err_unbind_all;
300
301 drm_mode_config_reset(drm);
302
303 /*
304 * enable drm irq mode.
305 * - with irq_enabled = true, we can use the vblank feature.
306 *
307 * P.S. note that we wouldn't use drm irq handler but
308 * just specific driver own one instead because
309 * drm framework supports only one irq handler.
310 */
311 drm->irq_enabled = true;
312
313 /* init kms poll for handling hpd */
314 drm_kms_helper_poll_init(drm);
315
316 ret = exynos_drm_fbdev_init(drm);
317 if (ret)
318 goto err_cleanup_poll;
319
320 /* register the DRM device */
321 ret = drm_dev_register(drm, 0);
322 if (ret < 0)
323 goto err_cleanup_fbdev;
324
325 return 0;
326
327err_cleanup_fbdev:
328 exynos_drm_fbdev_fini(drm);
329err_cleanup_poll:
330 drm_kms_helper_poll_fini(drm);
331err_unbind_all:
332 component_unbind_all(drm->dev, drm);
333err_mode_config_cleanup:
334 drm_mode_config_cleanup(drm);
335 exynos_drm_cleanup_dma(drm);
336 kfree(private);
337err_free_drm:
338 drm_dev_put(drm);
339
340 return ret;
341}
342
343static void exynos_drm_unbind(struct device *dev)
344{
345 struct drm_device *drm = dev_get_drvdata(dev);
346
347 drm_dev_unregister(drm);
348
349 exynos_drm_fbdev_fini(drm);
350 drm_kms_helper_poll_fini(drm);
351
352 component_unbind_all(drm->dev, drm);
353 drm_mode_config_cleanup(drm);
354 exynos_drm_cleanup_dma(drm);
355
356 kfree(drm->dev_private);
357 drm->dev_private = NULL;
358 dev_set_drvdata(dev, NULL);
359
360 drm_dev_put(drm);
361}
362
363static const struct component_master_ops exynos_drm_ops = {
364 .bind = exynos_drm_bind,
365 .unbind = exynos_drm_unbind,
366};
367
368static int exynos_drm_platform_probe(struct platform_device *pdev)
369{
370 struct component_match *match;
371
372 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
373
374 match = exynos_drm_match_add(&pdev->dev);
375 if (IS_ERR(match))
376 return PTR_ERR(match);
377
378 return component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
379 match);
380}
381
382static int exynos_drm_platform_remove(struct platform_device *pdev)
383{
384 component_master_del(&pdev->dev, &exynos_drm_ops);
385 return 0;
386}
387
388static struct platform_driver exynos_drm_platform_driver = {
389 .probe = exynos_drm_platform_probe,
390 .remove = exynos_drm_platform_remove,
391 .driver = {
392 .name = "exynos-drm",
393 .pm = &exynos_drm_pm_ops,
394 },
395};
396
397static void exynos_drm_unregister_devices(void)
398{
399 int i;
400
401 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
402 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
403 struct device *dev;
404
405 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
406 continue;
407
408 while ((dev = platform_find_device_by_driver(NULL,
409 &info->driver->driver))) {
410 put_device(dev);
411 platform_device_unregister(to_platform_device(dev));
412 }
413 }
414}
415
416static int exynos_drm_register_devices(void)
417{
418 struct platform_device *pdev;
419 int i;
420
421 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
422 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
423
424 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
425 continue;
426
427 pdev = platform_device_register_simple(
428 info->driver->driver.name, -1, NULL, 0);
429 if (IS_ERR(pdev))
430 goto fail;
431 }
432
433 return 0;
434fail:
435 exynos_drm_unregister_devices();
436 return PTR_ERR(pdev);
437}
438
439static void exynos_drm_unregister_drivers(void)
440{
441 int i;
442
443 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
444 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
445
446 if (!info->driver)
447 continue;
448
449 platform_driver_unregister(info->driver);
450 }
451}
452
453static int exynos_drm_register_drivers(void)
454{
455 int i, ret;
456
457 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
458 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
459
460 if (!info->driver)
461 continue;
462
463 ret = platform_driver_register(info->driver);
464 if (ret)
465 goto fail;
466 }
467 return 0;
468fail:
469 exynos_drm_unregister_drivers();
470 return ret;
471}
472
473static int exynos_drm_init(void)
474{
475 int ret;
476
477 ret = exynos_drm_register_devices();
478 if (ret)
479 return ret;
480
481 ret = exynos_drm_register_drivers();
482 if (ret)
483 goto err_unregister_pdevs;
484
485 return 0;
486
487err_unregister_pdevs:
488 exynos_drm_unregister_devices();
489
490 return ret;
491}
492
493static void exynos_drm_exit(void)
494{
495 exynos_drm_unregister_drivers();
496 exynos_drm_unregister_devices();
497}
498
499module_init(exynos_drm_init);
500module_exit(exynos_drm_exit);
501
502MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
503MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
504MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
505MODULE_DESCRIPTION("Samsung SoC DRM Driver");
506MODULE_LICENSE("GPL");