Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright (C) STMicroelectronics SA 2014
  3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
  4 * License terms:  GNU General Public License (GPL), version 2
  5 */
  6
  7#include <drm/drmP.h>
  8
  9#include <linux/component.h>
 10#include <linux/debugfs.h>
 11#include <linux/kernel.h>
 12#include <linux/module.h>
 13#include <linux/of_platform.h>
 14
 15#include <drm/drm_atomic.h>
 16#include <drm/drm_atomic_helper.h>
 17#include <drm/drm_crtc_helper.h>
 18#include <drm/drm_gem_cma_helper.h>
 19#include <drm/drm_fb_cma_helper.h>
 20
 21#include "sti_crtc.h"
 22#include "sti_drv.h"
 23#include "sti_plane.h"
 24
 25#define DRIVER_NAME	"sti"
 26#define DRIVER_DESC	"STMicroelectronics SoC DRM"
 27#define DRIVER_DATE	"20140601"
 28#define DRIVER_MAJOR	1
 29#define DRIVER_MINOR	0
 30
 31#define STI_MAX_FB_HEIGHT	4096
 32#define STI_MAX_FB_WIDTH	4096
 33
 34static int sti_drm_fps_get(void *data, u64 *val)
 35{
 36	struct drm_device *drm_dev = data;
 37	struct drm_plane *p;
 38	unsigned int i = 0;
 39
 40	*val = 0;
 41	list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
 42		struct sti_plane *plane = to_sti_plane(p);
 43
 44		*val |= plane->fps_info.output << i;
 45		i++;
 46	}
 47
 48	return 0;
 49}
 50
 51static int sti_drm_fps_set(void *data, u64 val)
 52{
 53	struct drm_device *drm_dev = data;
 54	struct drm_plane *p;
 55	unsigned int i = 0;
 56
 57	list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
 58		struct sti_plane *plane = to_sti_plane(p);
 59
 60		plane->fps_info.output = (val >> i) & 1;
 61		i++;
 62	}
 63
 64	return 0;
 65}
 66
 67DEFINE_SIMPLE_ATTRIBUTE(sti_drm_fps_fops,
 68			sti_drm_fps_get, sti_drm_fps_set, "%llu\n");
 69
 70static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
 71{
 72	struct drm_info_node *node = s->private;
 73	struct drm_device *dev = node->minor->dev;
 74	struct drm_plane *p;
 75	int ret;
 76
 77	ret = mutex_lock_interruptible(&dev->struct_mutex);
 78	if (ret)
 79		return ret;
 80
 81	list_for_each_entry(p, &dev->mode_config.plane_list, head) {
 82		struct sti_plane *plane = to_sti_plane(p);
 83
 84		seq_printf(s, "%s%s\n",
 85			   plane->fps_info.fps_str,
 86			   plane->fps_info.fips_str);
 87	}
 88
 89	mutex_unlock(&dev->struct_mutex);
 90	return 0;
 91}
 92
 93static struct drm_info_list sti_drm_dbg_list[] = {
 94	{"fps_get", sti_drm_fps_dbg_show, 0},
 95};
 96
 97static int sti_drm_debugfs_create(struct dentry *root,
 98				  struct drm_minor *minor,
 99				  const char *name,
100				  const struct file_operations *fops)
101{
102	struct drm_device *dev = minor->dev;
103	struct drm_info_node *node;
104	struct dentry *ent;
105
106	ent = debugfs_create_file(name, S_IRUGO | S_IWUSR, root, dev, fops);
107	if (IS_ERR(ent))
108		return PTR_ERR(ent);
109
110	node = kmalloc(sizeof(*node), GFP_KERNEL);
111	if (!node) {
112		debugfs_remove(ent);
113		return -ENOMEM;
114	}
115
116	node->minor = minor;
117	node->dent = ent;
118	node->info_ent = (void *)fops;
119
120	mutex_lock(&minor->debugfs_lock);
121	list_add(&node->list, &minor->debugfs_list);
122	mutex_unlock(&minor->debugfs_lock);
123
124	return 0;
125}
126
127static int sti_drm_dbg_init(struct drm_minor *minor)
128{
129	int ret;
130
131	ret = drm_debugfs_create_files(sti_drm_dbg_list,
132				       ARRAY_SIZE(sti_drm_dbg_list),
133				       minor->debugfs_root, minor);
134	if (ret)
135		goto err;
136
137	ret = sti_drm_debugfs_create(minor->debugfs_root, minor, "fps_show",
138				     &sti_drm_fps_fops);
139	if (ret)
140		goto err;
141
142	DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
143	return 0;
144err:
145	DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME);
146	return ret;
147}
148
149void sti_drm_dbg_cleanup(struct drm_minor *minor)
150{
151	drm_debugfs_remove_files(sti_drm_dbg_list,
152				 ARRAY_SIZE(sti_drm_dbg_list), minor);
153
154	drm_debugfs_remove_files((struct drm_info_list *)&sti_drm_fps_fops,
155				 1, minor);
156}
157
158static void sti_atomic_schedule(struct sti_private *private,
159				struct drm_atomic_state *state)
160{
161	private->commit.state = state;
162	schedule_work(&private->commit.work);
163}
164
165static void sti_atomic_complete(struct sti_private *private,
166				struct drm_atomic_state *state)
167{
168	struct drm_device *drm = private->drm_dev;
169
170	/*
171	 * Everything below can be run asynchronously without the need to grab
172	 * any modeset locks at all under one condition: It must be guaranteed
173	 * that the asynchronous work has either been cancelled (if the driver
174	 * supports it, which at least requires that the framebuffers get
175	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
176	 * before the new state gets committed on the software side with
177	 * drm_atomic_helper_swap_state().
178	 *
179	 * This scheme allows new atomic state updates to be prepared and
180	 * checked in parallel to the asynchronous completion of the previous
181	 * update. Which is important since compositors need to figure out the
182	 * composition of the next frame right after having submitted the
183	 * current layout.
184	 */
185
186	drm_atomic_helper_commit_modeset_disables(drm, state);
187	drm_atomic_helper_commit_planes(drm, state, false);
188	drm_atomic_helper_commit_modeset_enables(drm, state);
189
190	drm_atomic_helper_wait_for_vblanks(drm, state);
191
192	drm_atomic_helper_cleanup_planes(drm, state);
193	drm_atomic_state_free(state);
194}
195
196static void sti_atomic_work(struct work_struct *work)
197{
198	struct sti_private *private = container_of(work,
199			struct sti_private, commit.work);
200
201	sti_atomic_complete(private, private->commit.state);
202}
203
204static int sti_atomic_commit(struct drm_device *drm,
205			     struct drm_atomic_state *state, bool async)
206{
207	struct sti_private *private = drm->dev_private;
208	int err;
209
210	err = drm_atomic_helper_prepare_planes(drm, state);
211	if (err)
212		return err;
213
214	/* serialize outstanding asynchronous commits */
215	mutex_lock(&private->commit.lock);
216	flush_work(&private->commit.work);
217
218	/*
219	 * This is the point of no return - everything below never fails except
220	 * when the hw goes bonghits. Which means we can commit the new state on
221	 * the software side now.
222	 */
223
224	drm_atomic_helper_swap_state(drm, state);
225
226	if (async)
227		sti_atomic_schedule(private, state);
228	else
229		sti_atomic_complete(private, state);
230
231	mutex_unlock(&private->commit.lock);
232	return 0;
233}
234
235static const struct drm_mode_config_funcs sti_mode_config_funcs = {
236	.fb_create = drm_fb_cma_create,
237	.atomic_check = drm_atomic_helper_check,
238	.atomic_commit = sti_atomic_commit,
239};
240
241static void sti_mode_config_init(struct drm_device *dev)
242{
243	dev->mode_config.min_width = 0;
244	dev->mode_config.min_height = 0;
245
246	/*
247	 * set max width and height as default value.
248	 * this value would be used to check framebuffer size limitation
249	 * at drm_mode_addfb().
250	 */
251	dev->mode_config.max_width = STI_MAX_FB_WIDTH;
252	dev->mode_config.max_height = STI_MAX_FB_HEIGHT;
253
254	dev->mode_config.funcs = &sti_mode_config_funcs;
255}
256
257static int sti_load(struct drm_device *dev, unsigned long flags)
258{
259	struct sti_private *private;
260	int ret;
261
262	private = kzalloc(sizeof(*private), GFP_KERNEL);
263	if (!private) {
264		DRM_ERROR("Failed to allocate private\n");
265		return -ENOMEM;
266	}
267	dev->dev_private = (void *)private;
268	private->drm_dev = dev;
269
270	mutex_init(&private->commit.lock);
271	INIT_WORK(&private->commit.work, sti_atomic_work);
272
273	drm_mode_config_init(dev);
274	drm_kms_helper_poll_init(dev);
275
276	sti_mode_config_init(dev);
277
278	ret = component_bind_all(dev->dev, dev);
279	if (ret) {
280		drm_kms_helper_poll_fini(dev);
281		drm_mode_config_cleanup(dev);
282		kfree(private);
283		return ret;
284	}
285
286	drm_mode_config_reset(dev);
287
288	drm_helper_disable_unused_functions(dev);
289	drm_fbdev_cma_init(dev, 32,
290			   dev->mode_config.num_crtc,
291			   dev->mode_config.num_connector);
292
293	return 0;
294}
295
296static const struct file_operations sti_driver_fops = {
297	.owner = THIS_MODULE,
298	.open = drm_open,
299	.mmap = drm_gem_cma_mmap,
300	.poll = drm_poll,
301	.read = drm_read,
302	.unlocked_ioctl = drm_ioctl,
303#ifdef CONFIG_COMPAT
304	.compat_ioctl = drm_compat_ioctl,
305#endif
306	.release = drm_release,
307};
308
309static struct drm_driver sti_driver = {
310	.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
311	    DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
312	.load = sti_load,
313	.gem_free_object = drm_gem_cma_free_object,
314	.gem_vm_ops = &drm_gem_cma_vm_ops,
315	.dumb_create = drm_gem_cma_dumb_create,
316	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
317	.dumb_destroy = drm_gem_dumb_destroy,
318	.fops = &sti_driver_fops,
319
320	.get_vblank_counter = drm_vblank_no_hw_counter,
321	.enable_vblank = sti_crtc_enable_vblank,
322	.disable_vblank = sti_crtc_disable_vblank,
323
324	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
325	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
326	.gem_prime_export = drm_gem_prime_export,
327	.gem_prime_import = drm_gem_prime_import,
328	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
329	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
330	.gem_prime_vmap = drm_gem_cma_prime_vmap,
331	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
332	.gem_prime_mmap = drm_gem_cma_prime_mmap,
333
334	.debugfs_init = sti_drm_dbg_init,
335	.debugfs_cleanup = sti_drm_dbg_cleanup,
336
337	.name = DRIVER_NAME,
338	.desc = DRIVER_DESC,
339	.date = DRIVER_DATE,
340	.major = DRIVER_MAJOR,
341	.minor = DRIVER_MINOR,
342};
343
344static int compare_of(struct device *dev, void *data)
345{
346	return dev->of_node == data;
347}
348
349static int sti_bind(struct device *dev)
350{
351	return drm_platform_init(&sti_driver, to_platform_device(dev));
352}
353
354static void sti_unbind(struct device *dev)
355{
356	drm_put_dev(dev_get_drvdata(dev));
357}
358
359static const struct component_master_ops sti_ops = {
360	.bind = sti_bind,
361	.unbind = sti_unbind,
362};
363
364static int sti_platform_probe(struct platform_device *pdev)
365{
366	struct device *dev = &pdev->dev;
367	struct device_node *node = dev->of_node;
368	struct device_node *child_np;
369	struct component_match *match = NULL;
370
371	dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
372
373	of_platform_populate(node, NULL, NULL, dev);
374
375	child_np = of_get_next_available_child(node, NULL);
376
377	while (child_np) {
378		component_match_add(dev, &match, compare_of, child_np);
379		of_node_put(child_np);
380		child_np = of_get_next_available_child(node, child_np);
381	}
382
383	return component_master_add_with_match(dev, &sti_ops, match);
384}
385
386static int sti_platform_remove(struct platform_device *pdev)
387{
388	component_master_del(&pdev->dev, &sti_ops);
389	of_platform_depopulate(&pdev->dev);
390
391	return 0;
392}
393
394static const struct of_device_id sti_dt_ids[] = {
395	{ .compatible = "st,sti-display-subsystem", },
396	{ /* end node */ },
397};
398MODULE_DEVICE_TABLE(of, sti_dt_ids);
399
400static struct platform_driver sti_platform_driver = {
401	.probe = sti_platform_probe,
402	.remove = sti_platform_remove,
403	.driver = {
404		.name = DRIVER_NAME,
405		.of_match_table = sti_dt_ids,
406	},
407};
408
409static struct platform_driver * const drivers[] = {
410	&sti_tvout_driver,
411	&sti_vtac_driver,
412	&sti_hqvdp_driver,
413	&sti_hdmi_driver,
414	&sti_hda_driver,
415	&sti_dvo_driver,
416	&sti_vtg_driver,
417	&sti_compositor_driver,
418	&sti_platform_driver,
419};
420
421static int sti_drm_init(void)
422{
423	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
424}
425module_init(sti_drm_init);
426
427static void sti_drm_exit(void)
428{
429	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
430}
431module_exit(sti_drm_exit);
432
433MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
434MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
435MODULE_LICENSE("GPL");