Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
  4 * Author: Rob Clark <rob@ti.com>
  5 */
  6
  7#include <linux/dma-mapping.h>
  8#include <linux/platform_device.h>
  9#include <linux/sort.h>
 10#include <linux/sys_soc.h>
 11
 12#include <drm/drm_atomic.h>
 13#include <drm/drm_atomic_helper.h>
 14#include <drm/drm_bridge.h>
 15#include <drm/drm_bridge_connector.h>
 16#include <drm/drm_drv.h>
 17#include <drm/drm_fb_helper.h>
 18#include <drm/drm_file.h>
 19#include <drm/drm_ioctl.h>
 20#include <drm/drm_panel.h>
 21#include <drm/drm_prime.h>
 22#include <drm/drm_probe_helper.h>
 23#include <drm/drm_vblank.h>
 24
 25#include "omap_dmm_tiler.h"
 26#include "omap_drv.h"
 27
 28#define DRIVER_NAME		MODULE_NAME
 29#define DRIVER_DESC		"OMAP DRM"
 30#define DRIVER_DATE		"20110917"
 31#define DRIVER_MAJOR		1
 32#define DRIVER_MINOR		0
 33#define DRIVER_PATCHLEVEL	0
 34
 35/*
 36 * mode config funcs
 37 */
 38
 39/* Notes about mapping DSS and DRM entities:
 40 *    CRTC:        overlay
 41 *    encoder:     manager.. with some extension to allow one primary CRTC
 42 *                 and zero or more video CRTC's to be mapped to one encoder?
 43 *    connector:   dssdev.. manager can be attached/detached from different
 44 *                 devices
 45 */
 46
 47static void omap_atomic_wait_for_completion(struct drm_device *dev,
 48					    struct drm_atomic_state *old_state)
 49{
 50	struct drm_crtc_state *new_crtc_state;
 51	struct drm_crtc *crtc;
 52	unsigned int i;
 53	int ret;
 54
 55	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
 56		if (!new_crtc_state->active)
 57			continue;
 58
 59		ret = omap_crtc_wait_pending(crtc);
 60
 61		if (!ret)
 62			dev_warn(dev->dev,
 63				 "atomic complete timeout (pipe %u)!\n", i);
 64	}
 65}
 66
 67static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
 68{
 69	struct drm_device *dev = old_state->dev;
 70	struct omap_drm_private *priv = dev->dev_private;
 71	bool fence_cookie = dma_fence_begin_signalling();
 72
 73	dispc_runtime_get(priv->dispc);
 74
 75	/* Apply the atomic update. */
 76	drm_atomic_helper_commit_modeset_disables(dev, old_state);
 77
 78	if (priv->omaprev != 0x3430) {
 79		/* With the current dss dispc implementation we have to enable
 80		 * the new modeset before we can commit planes. The dispc ovl
 81		 * configuration relies on the video mode configuration been
 82		 * written into the HW when the ovl configuration is
 83		 * calculated.
 84		 *
 85		 * This approach is not ideal because after a mode change the
 86		 * plane update is executed only after the first vblank
 87		 * interrupt. The dispc implementation should be fixed so that
 88		 * it is able use uncommitted drm state information.
 89		 */
 90		drm_atomic_helper_commit_modeset_enables(dev, old_state);
 91		omap_atomic_wait_for_completion(dev, old_state);
 92
 93		drm_atomic_helper_commit_planes(dev, old_state, 0);
 94	} else {
 95		/*
 96		 * OMAP3 DSS seems to have issues with the work-around above,
 97		 * resulting in endless sync losts if a crtc is enabled without
 98		 * a plane. For now, skip the WA for OMAP3.
 99		 */
100		drm_atomic_helper_commit_planes(dev, old_state, 0);
101
102		drm_atomic_helper_commit_modeset_enables(dev, old_state);
103	}
104
105	drm_atomic_helper_commit_hw_done(old_state);
106
107	dma_fence_end_signalling(fence_cookie);
108
109	/*
110	 * Wait for completion of the page flips to ensure that old buffers
111	 * can't be touched by the hardware anymore before cleaning up planes.
112	 */
113	omap_atomic_wait_for_completion(dev, old_state);
114
115	drm_atomic_helper_cleanup_planes(dev, old_state);
116
117	dispc_runtime_put(priv->dispc);
118}
119
120static int drm_atomic_state_normalized_zpos_cmp(const void *a, const void *b)
121{
122	const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
123	const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
124
125	if (sa->normalized_zpos != sb->normalized_zpos)
126		return sa->normalized_zpos - sb->normalized_zpos;
127	else
128		return sa->plane->base.id - sb->plane->base.id;
129}
130
131/*
132 * This replaces the drm_atomic_normalize_zpos to handle the dual overlay case.
133 *
134 * Since both halves need to be 'appear' side by side the zpos is
135 * recalculated when dealing with dual overlay cases so that the other
136 * planes zpos is consistent.
137 */
138static int omap_atomic_update_normalize_zpos(struct drm_device *dev,
139					     struct drm_atomic_state *state)
140{
141	struct drm_crtc *crtc;
142	struct drm_crtc_state *old_state, *new_state;
143	struct drm_plane *plane;
144	int c, i, n, inc;
145	int total_planes = dev->mode_config.num_total_plane;
146	struct drm_plane_state **states;
147	int ret = 0;
148
149	states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
150	if (!states)
151		return -ENOMEM;
152
153	for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, c) {
154		if (old_state->plane_mask == new_state->plane_mask &&
155		    !new_state->zpos_changed)
156			continue;
157
158		/* Reset plane increment and index value for every crtc */
159		n = 0;
160
161		/*
162		 * Normalization process might create new states for planes
163		 * which normalized_zpos has to be recalculated.
164		 */
165		drm_for_each_plane_mask(plane, dev, new_state->plane_mask) {
166			struct drm_plane_state *plane_state =
167				drm_atomic_get_plane_state(new_state->state,
168							   plane);
169			if (IS_ERR(plane_state)) {
170				ret = PTR_ERR(plane_state);
171				goto done;
172			}
173			states[n++] = plane_state;
174		}
175
176		sort(states, n, sizeof(*states),
177		     drm_atomic_state_normalized_zpos_cmp, NULL);
178
179		for (i = 0, inc = 0; i < n; i++) {
180			plane = states[i]->plane;
181
182			states[i]->normalized_zpos = i + inc;
183			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] updated normalized zpos value %d\n",
184					 plane->base.id, plane->name,
185					 states[i]->normalized_zpos);
186
187			if (is_omap_plane_dual_overlay(states[i]))
188				inc++;
189		}
190		new_state->zpos_changed = true;
191	}
192
193done:
194	kfree(states);
195	return ret;
196}
197
198static int omap_atomic_check(struct drm_device *dev,
199			     struct drm_atomic_state *state)
200{
201	int ret;
202
203	ret = drm_atomic_helper_check(dev, state);
204	if (ret)
205		return ret;
206
207	if (dev->mode_config.normalize_zpos) {
208		ret = omap_atomic_update_normalize_zpos(dev, state);
209		if (ret)
210			return ret;
211	}
212
213	return 0;
214}
215
216static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
217	.atomic_commit_tail = omap_atomic_commit_tail,
218};
219
220static const struct drm_mode_config_funcs omap_mode_config_funcs = {
221	.fb_create = omap_framebuffer_create,
222	.output_poll_changed = drm_fb_helper_output_poll_changed,
223	.atomic_check = omap_atomic_check,
224	.atomic_commit = drm_atomic_helper_commit,
225};
226
227/* Global/shared object state funcs */
228
229/*
230 * This is a helper that returns the private state currently in operation.
231 * Note that this would return the "old_state" if called in the atomic check
232 * path, and the "new_state" after the atomic swap has been done.
233 */
234struct omap_global_state *
235omap_get_existing_global_state(struct omap_drm_private *priv)
236{
237	return to_omap_global_state(priv->glob_obj.state);
238}
239
240/*
241 * This acquires the modeset lock set aside for global state, creates
242 * a new duplicated private object state.
243 */
244struct omap_global_state *__must_check
245omap_get_global_state(struct drm_atomic_state *s)
246{
247	struct omap_drm_private *priv = s->dev->dev_private;
248	struct drm_private_state *priv_state;
249
250	priv_state = drm_atomic_get_private_obj_state(s, &priv->glob_obj);
251	if (IS_ERR(priv_state))
252		return ERR_CAST(priv_state);
253
254	return to_omap_global_state(priv_state);
255}
256
257static struct drm_private_state *
258omap_global_duplicate_state(struct drm_private_obj *obj)
259{
260	struct omap_global_state *state;
261
262	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
263	if (!state)
264		return NULL;
265
266	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
267
268	return &state->base;
269}
270
271static void omap_global_destroy_state(struct drm_private_obj *obj,
272				      struct drm_private_state *state)
273{
274	struct omap_global_state *omap_state = to_omap_global_state(state);
275
276	kfree(omap_state);
277}
278
279static const struct drm_private_state_funcs omap_global_state_funcs = {
280	.atomic_duplicate_state = omap_global_duplicate_state,
281	.atomic_destroy_state = omap_global_destroy_state,
282};
283
284static int omap_global_obj_init(struct drm_device *dev)
285{
286	struct omap_drm_private *priv = dev->dev_private;
287	struct omap_global_state *state;
288
289	state = kzalloc(sizeof(*state), GFP_KERNEL);
290	if (!state)
291		return -ENOMEM;
292
293	drm_atomic_private_obj_init(dev, &priv->glob_obj, &state->base,
294				    &omap_global_state_funcs);
295	return 0;
296}
297
298static void omap_global_obj_fini(struct omap_drm_private *priv)
299{
300	drm_atomic_private_obj_fini(&priv->glob_obj);
301}
302
303static void omap_disconnect_pipelines(struct drm_device *ddev)
304{
305	struct omap_drm_private *priv = ddev->dev_private;
306	unsigned int i;
307
308	for (i = 0; i < priv->num_pipes; i++) {
309		struct omap_drm_pipeline *pipe = &priv->pipes[i];
310
311		omapdss_device_disconnect(NULL, pipe->output);
312
313		omapdss_device_put(pipe->output);
314		pipe->output = NULL;
315	}
316
317	memset(&priv->channels, 0, sizeof(priv->channels));
318
319	priv->num_pipes = 0;
320}
321
322static int omap_connect_pipelines(struct drm_device *ddev)
323{
324	struct omap_drm_private *priv = ddev->dev_private;
325	struct omap_dss_device *output = NULL;
326	int r;
327
328	for_each_dss_output(output) {
329		r = omapdss_device_connect(priv->dss, NULL, output);
330		if (r == -EPROBE_DEFER) {
331			omapdss_device_put(output);
332			return r;
333		} else if (r) {
334			dev_warn(output->dev, "could not connect output %s\n",
335				 output->name);
336		} else {
337			struct omap_drm_pipeline *pipe;
338
339			pipe = &priv->pipes[priv->num_pipes++];
340			pipe->output = omapdss_device_get(output);
341
342			if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) {
343				/* To balance the 'for_each_dss_output' loop */
344				omapdss_device_put(output);
345				break;
346			}
347		}
348	}
349
350	return 0;
351}
352
353static int omap_compare_pipelines(const void *a, const void *b)
354{
355	const struct omap_drm_pipeline *pipe1 = a;
356	const struct omap_drm_pipeline *pipe2 = b;
357
358	if (pipe1->alias_id > pipe2->alias_id)
359		return 1;
360	else if (pipe1->alias_id < pipe2->alias_id)
361		return -1;
362	return 0;
363}
364
365static int omap_modeset_init_properties(struct drm_device *dev)
366{
367	struct omap_drm_private *priv = dev->dev_private;
368	unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
369
370	priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0,
371						      num_planes - 1);
372	if (!priv->zorder_prop)
373		return -ENOMEM;
374
375	return 0;
376}
377
378static int omap_display_id(struct omap_dss_device *output)
379{
380	struct device_node *node = NULL;
381
382	if (output->bridge) {
383		struct drm_bridge *bridge = output->bridge;
384
385		while (drm_bridge_get_next_bridge(bridge))
386			bridge = drm_bridge_get_next_bridge(bridge);
387
388		node = bridge->of_node;
389	}
390
391	return node ? of_alias_get_id(node, "display") : -ENODEV;
392}
393
394static int omap_modeset_init(struct drm_device *dev)
395{
396	struct omap_drm_private *priv = dev->dev_private;
397	int num_ovls = dispc_get_num_ovls(priv->dispc);
398	int num_mgrs = dispc_get_num_mgrs(priv->dispc);
399	unsigned int i;
400	int ret;
401	u32 plane_crtc_mask;
402
403	if (!omapdss_stack_is_ready())
404		return -EPROBE_DEFER;
405
406	ret = omap_modeset_init_properties(dev);
407	if (ret < 0)
408		return ret;
409
410	/*
411	 * This function creates exactly one connector, encoder, crtc,
412	 * and primary plane per each connected dss-device. Each
413	 * connector->encoder->crtc chain is expected to be separate
414	 * and each crtc is connect to a single dss-channel. If the
415	 * configuration does not match the expectations or exceeds
416	 * the available resources, the configuration is rejected.
417	 */
418	ret = omap_connect_pipelines(dev);
419	if (ret < 0)
420		return ret;
421
422	if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) {
423		dev_err(dev->dev, "%s(): Too many connected displays\n",
424			__func__);
425		return -EINVAL;
426	}
427
428	/* Create all planes first. They can all be put to any CRTC. */
429	plane_crtc_mask = (1 << priv->num_pipes) - 1;
430
431	for (i = 0; i < num_ovls; i++) {
432		enum drm_plane_type type = i < priv->num_pipes
433					 ? DRM_PLANE_TYPE_PRIMARY
434					 : DRM_PLANE_TYPE_OVERLAY;
435		struct drm_plane *plane;
436
437		if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes)))
438			return -EINVAL;
439
440		plane = omap_plane_init(dev, i, type, plane_crtc_mask);
441		if (IS_ERR(plane))
442			return PTR_ERR(plane);
443
444		priv->planes[priv->num_planes++] = plane;
445	}
446
447	/*
448	 * Create the encoders, attach the bridges and get the pipeline alias
449	 * IDs.
450	 */
451	for (i = 0; i < priv->num_pipes; i++) {
452		struct omap_drm_pipeline *pipe = &priv->pipes[i];
453		int id;
454
455		pipe->encoder = omap_encoder_init(dev, pipe->output);
456		if (!pipe->encoder)
457			return -ENOMEM;
458
459		if (pipe->output->bridge) {
460			ret = drm_bridge_attach(pipe->encoder,
461						pipe->output->bridge, NULL,
462						DRM_BRIDGE_ATTACH_NO_CONNECTOR);
463			if (ret < 0)
464				return ret;
465		}
466
467		id = omap_display_id(pipe->output);
468		pipe->alias_id = id >= 0 ? id : i;
469	}
470
471	/* Sort the pipelines by DT aliases. */
472	sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]),
473	     omap_compare_pipelines, NULL);
474
475	/*
476	 * Populate the pipeline lookup table by DISPC channel. Only one display
477	 * is allowed per channel.
478	 */
479	for (i = 0; i < priv->num_pipes; ++i) {
480		struct omap_drm_pipeline *pipe = &priv->pipes[i];
481		enum omap_channel channel = pipe->output->dispc_channel;
482
483		if (WARN_ON(priv->channels[channel] != NULL))
484			return -EINVAL;
485
486		priv->channels[channel] = pipe;
487	}
488
489	/* Create the connectors and CRTCs. */
490	for (i = 0; i < priv->num_pipes; i++) {
491		struct omap_drm_pipeline *pipe = &priv->pipes[i];
492		struct drm_encoder *encoder = pipe->encoder;
493		struct drm_crtc *crtc;
494
495		pipe->connector = drm_bridge_connector_init(dev, encoder);
496		if (IS_ERR(pipe->connector)) {
497			dev_err(priv->dev,
498				"unable to create bridge connector for %s\n",
499				pipe->output->name);
500			return PTR_ERR(pipe->connector);
501		}
502
503		drm_connector_attach_encoder(pipe->connector, encoder);
504
505		crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
506		if (IS_ERR(crtc))
507			return PTR_ERR(crtc);
508
509		encoder->possible_crtcs = 1 << i;
510		pipe->crtc = crtc;
511	}
512
513	DBG("registered %u planes, %u crtcs/encoders/connectors\n",
514	    priv->num_planes, priv->num_pipes);
515
516	dev->mode_config.min_width = 8;
517	dev->mode_config.min_height = 2;
518
519	/*
520	 * Note: these values are used for multiple independent things:
521	 * connector mode filtering, buffer sizes, crtc sizes...
522	 * Use big enough values here to cover all use cases, and do more
523	 * specific checking in the respective code paths.
524	 */
525	dev->mode_config.max_width = 8192;
526	dev->mode_config.max_height = 8192;
527
528	/* We want the zpos to be normalized */
529	dev->mode_config.normalize_zpos = true;
530
531	dev->mode_config.funcs = &omap_mode_config_funcs;
532	dev->mode_config.helper_private = &omap_mode_config_helper_funcs;
533
534	drm_mode_config_reset(dev);
535
536	omap_drm_irq_install(dev);
537
538	return 0;
539}
540
541static void omap_modeset_fini(struct drm_device *ddev)
542{
543	omap_drm_irq_uninstall(ddev);
544
545	drm_mode_config_cleanup(ddev);
546}
547
548/*
549 * Enable the HPD in external components if supported
550 */
551static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
552{
553	struct omap_drm_private *priv = ddev->dev_private;
554	unsigned int i;
555
556	for (i = 0; i < priv->num_pipes; i++) {
557		struct drm_connector *connector = priv->pipes[i].connector;
558
559		if (!connector)
560			continue;
561
562		if (priv->pipes[i].output->bridge)
563			drm_bridge_connector_enable_hpd(connector);
564	}
565}
566
567/*
568 * Disable the HPD in external components if supported
569 */
570static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
571{
572	struct omap_drm_private *priv = ddev->dev_private;
573	unsigned int i;
574
575	for (i = 0; i < priv->num_pipes; i++) {
576		struct drm_connector *connector = priv->pipes[i].connector;
577
578		if (!connector)
579			continue;
580
581		if (priv->pipes[i].output->bridge)
582			drm_bridge_connector_disable_hpd(connector);
583	}
584}
585
586/*
587 * drm ioctl funcs
588 */
589
590
591static int ioctl_get_param(struct drm_device *dev, void *data,
592		struct drm_file *file_priv)
593{
594	struct omap_drm_private *priv = dev->dev_private;
595	struct drm_omap_param *args = data;
596
597	DBG("%p: param=%llu", dev, args->param);
598
599	switch (args->param) {
600	case OMAP_PARAM_CHIPSET_ID:
601		args->value = priv->omaprev;
602		break;
603	default:
604		DBG("unknown parameter %lld", args->param);
605		return -EINVAL;
606	}
607
608	return 0;
609}
610
611#define OMAP_BO_USER_MASK	0x00ffffff	/* flags settable by userspace */
612
613static int ioctl_gem_new(struct drm_device *dev, void *data,
614		struct drm_file *file_priv)
615{
616	struct drm_omap_gem_new *args = data;
617	u32 flags = args->flags & OMAP_BO_USER_MASK;
618
619	VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
620	     args->size.bytes, flags);
621
622	return omap_gem_new_handle(dev, file_priv, args->size, flags,
623				   &args->handle);
624}
625
626static int ioctl_gem_info(struct drm_device *dev, void *data,
627		struct drm_file *file_priv)
628{
629	struct drm_omap_gem_info *args = data;
630	struct drm_gem_object *obj;
631	int ret = 0;
632
633	VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
634
635	obj = drm_gem_object_lookup(file_priv, args->handle);
636	if (!obj)
637		return -ENOENT;
638
639	args->size = omap_gem_mmap_size(obj);
640	args->offset = omap_gem_mmap_offset(obj);
641
642	drm_gem_object_put(obj);
643
644	return ret;
645}
646
647static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
648	DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param,
649			  DRM_RENDER_ALLOW),
650	DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, drm_invalid_op,
651			  DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
652	DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new,
653			  DRM_RENDER_ALLOW),
654	/* Deprecated, to be removed. */
655	DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, drm_noop,
656			  DRM_RENDER_ALLOW),
657	/* Deprecated, to be removed. */
658	DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, drm_noop,
659			  DRM_RENDER_ALLOW),
660	DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info,
661			  DRM_RENDER_ALLOW),
662};
663
664/*
665 * drm driver funcs
666 */
667
668static int dev_open(struct drm_device *dev, struct drm_file *file)
669{
670	file->driver_priv = NULL;
671
672	DBG("open: dev=%p, file=%p", dev, file);
673
674	return 0;
675}
676
677static const struct file_operations omapdriver_fops = {
678	.owner = THIS_MODULE,
679	.open = drm_open,
680	.unlocked_ioctl = drm_ioctl,
681	.compat_ioctl = drm_compat_ioctl,
682	.release = drm_release,
683	.mmap = omap_gem_mmap,
684	.poll = drm_poll,
685	.read = drm_read,
686	.llseek = noop_llseek,
687};
688
689static const struct drm_driver omap_drm_driver = {
690	.driver_features = DRIVER_MODESET | DRIVER_GEM  |
691		DRIVER_ATOMIC | DRIVER_RENDER,
692	.open = dev_open,
693	.lastclose = drm_fb_helper_lastclose,
694#ifdef CONFIG_DEBUG_FS
695	.debugfs_init = omap_debugfs_init,
696#endif
697	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
698	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
699	.gem_prime_import = omap_gem_prime_import,
700	.dumb_create = omap_gem_dumb_create,
701	.dumb_map_offset = omap_gem_dumb_map_offset,
702	.ioctls = ioctls,
703	.num_ioctls = DRM_OMAP_NUM_IOCTLS,
704	.fops = &omapdriver_fops,
705	.name = DRIVER_NAME,
706	.desc = DRIVER_DESC,
707	.date = DRIVER_DATE,
708	.major = DRIVER_MAJOR,
709	.minor = DRIVER_MINOR,
710	.patchlevel = DRIVER_PATCHLEVEL,
711};
712
713static const struct soc_device_attribute omapdrm_soc_devices[] = {
714	{ .family = "OMAP3", .data = (void *)0x3430 },
715	{ .family = "OMAP4", .data = (void *)0x4430 },
716	{ .family = "OMAP5", .data = (void *)0x5430 },
717	{ .family = "DRA7",  .data = (void *)0x0752 },
718	{ /* sentinel */ }
719};
720
721static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
722{
723	const struct soc_device_attribute *soc;
724	struct dss_pdata *pdata = dev->platform_data;
725	struct drm_device *ddev;
726	int ret;
727
728	DBG("%s", dev_name(dev));
729
730	if (drm_firmware_drivers_only())
731		return -ENODEV;
732
733	/* Allocate and initialize the DRM device. */
734	ddev = drm_dev_alloc(&omap_drm_driver, dev);
735	if (IS_ERR(ddev))
736		return PTR_ERR(ddev);
737
738	priv->ddev = ddev;
739	ddev->dev_private = priv;
740
741	priv->dev = dev;
742	priv->dss = pdata->dss;
743	priv->dispc = dispc_get_dispc(priv->dss);
744
745	priv->dss->mgr_ops_priv = priv;
746
747	soc = soc_device_match(omapdrm_soc_devices);
748	priv->omaprev = soc ? (uintptr_t)soc->data : 0;
749	priv->wq = alloc_ordered_workqueue("omapdrm", 0);
750
751	mutex_init(&priv->list_lock);
752	INIT_LIST_HEAD(&priv->obj_list);
753
754	/* Get memory bandwidth limits */
755	priv->max_bandwidth = dispc_get_memory_bandwidth_limit(priv->dispc);
756
757	omap_gem_init(ddev);
758
759	drm_mode_config_init(ddev);
760
761	ret = omap_global_obj_init(ddev);
762	if (ret)
763		goto err_gem_deinit;
764
765	ret = omap_hwoverlays_init(priv);
766	if (ret)
767		goto err_free_priv_obj;
768
769	ret = omap_modeset_init(ddev);
770	if (ret) {
771		dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret);
772		goto err_free_overlays;
773	}
774
775	/* Initialize vblank handling, start with all CRTCs disabled. */
776	ret = drm_vblank_init(ddev, priv->num_pipes);
777	if (ret) {
778		dev_err(priv->dev, "could not init vblank\n");
779		goto err_cleanup_modeset;
780	}
781
782	omap_fbdev_init(ddev);
783
784	drm_kms_helper_poll_init(ddev);
785	omap_modeset_enable_external_hpd(ddev);
786
787	/*
788	 * Register the DRM device with the core and the connectors with
789	 * sysfs.
790	 */
791	ret = drm_dev_register(ddev, 0);
792	if (ret)
793		goto err_cleanup_helpers;
794
795	return 0;
796
797err_cleanup_helpers:
798	omap_modeset_disable_external_hpd(ddev);
799	drm_kms_helper_poll_fini(ddev);
800
801	omap_fbdev_fini(ddev);
802err_cleanup_modeset:
803	omap_modeset_fini(ddev);
804err_free_overlays:
805	omap_hwoverlays_destroy(priv);
806err_free_priv_obj:
807	omap_global_obj_fini(priv);
808err_gem_deinit:
809	drm_mode_config_cleanup(ddev);
810	omap_gem_deinit(ddev);
811	destroy_workqueue(priv->wq);
812	omap_disconnect_pipelines(ddev);
813	drm_dev_put(ddev);
814	return ret;
815}
816
817static void omapdrm_cleanup(struct omap_drm_private *priv)
818{
819	struct drm_device *ddev = priv->ddev;
820
821	DBG("");
822
823	drm_dev_unregister(ddev);
824
825	omap_modeset_disable_external_hpd(ddev);
826	drm_kms_helper_poll_fini(ddev);
827
828	omap_fbdev_fini(ddev);
829
830	drm_atomic_helper_shutdown(ddev);
831
832	omap_modeset_fini(ddev);
833	omap_hwoverlays_destroy(priv);
834	omap_global_obj_fini(priv);
835	drm_mode_config_cleanup(ddev);
836	omap_gem_deinit(ddev);
837
838	destroy_workqueue(priv->wq);
839
840	omap_disconnect_pipelines(ddev);
841
842	drm_dev_put(ddev);
843}
844
845static int pdev_probe(struct platform_device *pdev)
846{
847	struct omap_drm_private *priv;
848	int ret;
849
850	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
851	if (ret) {
852		dev_err(&pdev->dev, "Failed to set the DMA mask\n");
853		return ret;
854	}
855
856	/* Allocate and initialize the driver private structure. */
857	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
858	if (!priv)
859		return -ENOMEM;
860
861	platform_set_drvdata(pdev, priv);
862
863	ret = omapdrm_init(priv, &pdev->dev);
864	if (ret < 0)
865		kfree(priv);
866
867	return ret;
868}
869
870static int pdev_remove(struct platform_device *pdev)
871{
872	struct omap_drm_private *priv = platform_get_drvdata(pdev);
873
874	omapdrm_cleanup(priv);
875	kfree(priv);
876
877	return 0;
878}
879
880#ifdef CONFIG_PM_SLEEP
881static int omap_drm_suspend(struct device *dev)
882{
883	struct omap_drm_private *priv = dev_get_drvdata(dev);
884	struct drm_device *drm_dev = priv->ddev;
885
886	return drm_mode_config_helper_suspend(drm_dev);
887}
888
889static int omap_drm_resume(struct device *dev)
890{
891	struct omap_drm_private *priv = dev_get_drvdata(dev);
892	struct drm_device *drm_dev = priv->ddev;
893
894	drm_mode_config_helper_resume(drm_dev);
895
896	return omap_gem_resume(drm_dev);
897}
898#endif
899
900static SIMPLE_DEV_PM_OPS(omapdrm_pm_ops, omap_drm_suspend, omap_drm_resume);
901
902static struct platform_driver pdev = {
903	.driver = {
904		.name = "omapdrm",
905		.pm = &omapdrm_pm_ops,
906	},
907	.probe = pdev_probe,
908	.remove = pdev_remove,
909};
910
911static struct platform_driver * const drivers[] = {
912	&omap_dmm_driver,
913	&pdev,
914};
915
916static int __init omap_drm_init(void)
917{
918	int r;
919
920	DBG("init");
921
922	r = omap_dss_init();
923	if (r)
924		return r;
925
926	r = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
927	if (r) {
928		omap_dss_exit();
929		return r;
930	}
931
932	return 0;
933}
934
935static void __exit omap_drm_fini(void)
936{
937	DBG("fini");
938
939	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
940
941	omap_dss_exit();
942}
943
944module_init(omap_drm_init);
945module_exit(omap_drm_fini);
946
947MODULE_AUTHOR("Rob Clark <rob@ti.com>");
948MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
949MODULE_DESCRIPTION("OMAP DRM Display Driver");
950MODULE_ALIAS("platform:" DRIVER_NAME);
951MODULE_LICENSE("GPL v2");