Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: MIT
  2
  3#include <linux/fb.h>
  4#include <linux/vmalloc.h>
  5
 
  6#include <drm/drm_drv.h>
  7#include <drm/drm_fbdev_dma.h>
  8#include <drm/drm_fb_dma_helper.h>
  9#include <drm/drm_fb_helper.h>
 10#include <drm/drm_framebuffer.h>
 11#include <drm/drm_gem_dma_helper.h>
 12
 
 
 13/*
 14 * struct fb_ops
 15 */
 16
 17static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
 18{
 19	struct drm_fb_helper *fb_helper = info->par;
 20
 21	/* No need to take a ref for fbcon because it unbinds on unregister */
 22	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
 23		return -ENODEV;
 24
 25	return 0;
 26}
 27
 28static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
 29{
 30	struct drm_fb_helper *fb_helper = info->par;
 31
 32	if (user)
 33		module_put(fb_helper->dev->driver->fops->owner);
 34
 35	return 0;
 36}
 37
 38static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 39{
 40	struct drm_fb_helper *fb_helper = info->par;
 41
 42	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
 43}
 44
 45static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
 46{
 47	struct drm_fb_helper *fb_helper = info->par;
 48
 49	if (!fb_helper->dev)
 50		return;
 51
 52	if (info->fbdefio)
 53		fb_deferred_io_cleanup(info);
 54	drm_fb_helper_fini(fb_helper);
 55
 56	drm_client_buffer_vunmap(fb_helper->buffer);
 57	drm_client_framebuffer_delete(fb_helper->buffer);
 58	drm_client_release(&fb_helper->client);
 59	drm_fb_helper_unprepare(fb_helper);
 60	kfree(fb_helper);
 61}
 62
 63static const struct fb_ops drm_fbdev_dma_fb_ops = {
 64	.owner = THIS_MODULE,
 65	.fb_open = drm_fbdev_dma_fb_open,
 66	.fb_release = drm_fbdev_dma_fb_release,
 67	__FB_DEFAULT_DMAMEM_OPS_RDWR,
 68	DRM_FB_HELPER_DEFAULT_OPS,
 69	__FB_DEFAULT_DMAMEM_OPS_DRAW,
 70	.fb_mmap = drm_fbdev_dma_fb_mmap,
 71	.fb_destroy = drm_fbdev_dma_fb_destroy,
 72};
 73
 74FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
 75				   drm_fb_helper_damage_range,
 76				   drm_fb_helper_damage_area);
 77
 78static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
 79{
 80	struct drm_fb_helper *fb_helper = info->par;
 81	void *shadow = info->screen_buffer;
 82
 83	if (!fb_helper->dev)
 84		return;
 85
 86	if (info->fbdefio)
 87		fb_deferred_io_cleanup(info);
 88	drm_fb_helper_fini(fb_helper);
 89	vfree(shadow);
 90
 91	drm_client_buffer_vunmap(fb_helper->buffer);
 92	drm_client_framebuffer_delete(fb_helper->buffer);
 93	drm_client_release(&fb_helper->client);
 94	drm_fb_helper_unprepare(fb_helper);
 95	kfree(fb_helper);
 96}
 97
 98static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
 99	.owner = THIS_MODULE,
100	.fb_open = drm_fbdev_dma_fb_open,
101	.fb_release = drm_fbdev_dma_fb_release,
102	FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
103	DRM_FB_HELPER_DEFAULT_OPS,
104	.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
 
 
105};
106
107/*
108 * struct drm_fb_helper
109 */
110
111static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
112					   struct drm_clip_rect *clip,
113					   struct iosys_map *dst)
114{
115	struct drm_framebuffer *fb = fb_helper->fb;
116	size_t offset = clip->y1 * fb->pitches[0];
117	size_t len = clip->x2 - clip->x1;
118	unsigned int y;
119	void *src;
120
121	switch (drm_format_info_bpp(fb->format, 0)) {
122	case 1:
123		offset += clip->x1 / 8;
124		len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
125		break;
126	case 2:
127		offset += clip->x1 / 4;
128		len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
129		break;
130	case 4:
131		offset += clip->x1 / 2;
132		len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
133		break;
134	default:
135		offset += clip->x1 * fb->format->cpp[0];
136		len *= fb->format->cpp[0];
137		break;
138	}
139
140	src = fb_helper->info->screen_buffer + offset;
141	iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
 
 
 
 
142
143	for (y = clip->y1; y < clip->y2; y++) {
144		iosys_map_memcpy_to(dst, 0, src, len);
145		iosys_map_incr(dst, fb->pitches[0]);
146		src += fb->pitches[0];
147	}
148}
149
150static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
151				     struct drm_clip_rect *clip)
152{
153	struct drm_client_buffer *buffer = fb_helper->buffer;
154	struct iosys_map dst;
 
 
155
156	/*
157	 * For fbdev emulation, we only have to protect against fbdev modeset
158	 * operations. Nothing else will involve the client buffer's BO. So it
159	 * is sufficient to acquire struct drm_fb_helper.lock here.
160	 */
161	mutex_lock(&fb_helper->lock);
162
163	dst = buffer->map;
164	drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
 
 
 
165
166	mutex_unlock(&fb_helper->lock);
167
168	return 0;
169}
170static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
171					 struct drm_clip_rect *clip)
172{
173	struct drm_device *dev = helper->dev;
174	int ret;
175
176	/* Call damage handlers only if necessary */
177	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
178		return 0;
179
180	if (helper->fb->funcs->dirty) {
181		ret = drm_fbdev_dma_damage_blit(helper, clip);
182		if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
183			return ret;
184
185		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
186		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
187			return ret;
188	}
189
190	return 0;
 
 
 
 
 
 
 
 
191}
192
193static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
194	.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
195};
196
197/*
198 * struct drm_fb_helper
199 */
200
201static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
202						 struct drm_fb_helper_surface_size *sizes)
203{
204	struct drm_device *dev = fb_helper->dev;
205	struct drm_client_buffer *buffer = fb_helper->buffer;
206	struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
207	struct drm_framebuffer *fb = fb_helper->fb;
208	struct fb_info *info = fb_helper->info;
209	struct iosys_map map = buffer->map;
210
211	info->fbops = &drm_fbdev_dma_fb_ops;
212
213	/* screen */
214	info->flags |= FBINFO_VIRTFB; /* system memory */
215	if (dma_obj->map_noncoherent)
216		info->flags |= FBINFO_READS_FAST; /* signal caching */
217	info->screen_size = sizes->surface_height * fb->pitches[0];
218	info->screen_buffer = map.vaddr;
219	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
220		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
221			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
222	}
223	info->fix.smem_len = info->screen_size;
 
 
 
 
224
225	return 0;
226}
227
228static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
229							  struct drm_fb_helper_surface_size *sizes)
230{
231	struct drm_client_buffer *buffer = fb_helper->buffer;
232	struct fb_info *info = fb_helper->info;
233	size_t screen_size = buffer->gem->size;
234	void *screen_buffer;
235	int ret;
236
237	/*
238	 * Deferred I/O requires struct page for framebuffer memory,
239	 * which is not guaranteed for all DMA ranges. We thus create
240	 * a shadow buffer in system memory.
241	 */
242	screen_buffer = vzalloc(screen_size);
243	if (!screen_buffer)
244		return -ENOMEM;
245
246	info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
247
248	/* screen */
249	info->flags |= FBINFO_VIRTFB; /* system memory */
250	info->flags |= FBINFO_READS_FAST; /* signal caching */
251	info->screen_buffer = screen_buffer;
252	info->fix.smem_len = screen_size;
253
254	fb_helper->fbdefio.delay = HZ / 20;
255	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
256
257	info->fbdefio = &fb_helper->fbdefio;
258	ret = fb_deferred_io_init(info);
259	if (ret)
260		goto err_vfree;
261
262	return 0;
263
264err_vfree:
265	vfree(screen_buffer);
 
 
266	return ret;
267}
268
269int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
270				     struct drm_fb_helper_surface_size *sizes)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271{
272	struct drm_client_dev *client = &fb_helper->client;
273	struct drm_device *dev = fb_helper->dev;
274	struct drm_client_buffer *buffer;
275	struct drm_framebuffer *fb;
276	struct fb_info *info;
277	u32 format;
278	struct iosys_map map;
279	int ret;
280
281	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
282		    sizes->surface_width, sizes->surface_height,
283		    sizes->surface_bpp);
284
285	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
286					     sizes->surface_depth);
287	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
288					       sizes->surface_height, format);
289	if (IS_ERR(buffer))
290		return PTR_ERR(buffer);
291
292	fb = buffer->fb;
 
 
 
293
294	ret = drm_client_buffer_vmap(buffer, &map);
295	if (ret) {
296		goto err_drm_client_buffer_delete;
297	} else if (drm_WARN_ON(dev, map.is_iomem)) {
298		ret = -ENODEV; /* I/O memory not supported; use generic emulation */
299		goto err_drm_client_buffer_delete;
300	}
301
302	fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
303	fb_helper->buffer = buffer;
304	fb_helper->fb = fb;
305
306	info = drm_fb_helper_alloc_info(fb_helper);
307	if (IS_ERR(info)) {
308		ret = PTR_ERR(info);
309		goto err_drm_client_buffer_vunmap;
310	}
311
312	drm_fb_helper_fill_info(info, fb_helper, sizes);
313
314	if (fb->funcs->dirty)
315		ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
316	else
317		ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
318	if (ret)
319		goto err_drm_fb_helper_release_info;
320
321	return 0;
322
323err_drm_fb_helper_release_info:
324	drm_fb_helper_release_info(fb_helper);
325err_drm_client_buffer_vunmap:
326	fb_helper->fb = NULL;
327	fb_helper->buffer = NULL;
328	drm_client_buffer_vunmap(buffer);
329err_drm_client_buffer_delete:
330	drm_client_framebuffer_delete(buffer);
331	return ret;
332}
333EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
v6.9.4
  1// SPDX-License-Identifier: MIT
  2
  3#include <linux/fb.h>
 
  4
  5#include <drm/drm_crtc_helper.h>
  6#include <drm/drm_drv.h>
 
 
  7#include <drm/drm_fb_helper.h>
  8#include <drm/drm_framebuffer.h>
  9#include <drm/drm_gem_dma_helper.h>
 10
 11#include <drm/drm_fbdev_dma.h>
 12
 13/*
 14 * struct fb_ops
 15 */
 16
 17static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
 18{
 19	struct drm_fb_helper *fb_helper = info->par;
 20
 21	/* No need to take a ref for fbcon because it unbinds on unregister */
 22	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
 23		return -ENODEV;
 24
 25	return 0;
 26}
 27
 28static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
 29{
 30	struct drm_fb_helper *fb_helper = info->par;
 31
 32	if (user)
 33		module_put(fb_helper->dev->driver->fops->owner);
 34
 35	return 0;
 36}
 37
 
 
 
 
 
 
 
 38static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
 39{
 40	struct drm_fb_helper *fb_helper = info->par;
 41
 42	if (!fb_helper->dev)
 43		return;
 44
 
 
 45	drm_fb_helper_fini(fb_helper);
 46
 47	drm_client_buffer_vunmap(fb_helper->buffer);
 48	drm_client_framebuffer_delete(fb_helper->buffer);
 49	drm_client_release(&fb_helper->client);
 50	drm_fb_helper_unprepare(fb_helper);
 51	kfree(fb_helper);
 52}
 53
 54static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 55{
 56	struct drm_fb_helper *fb_helper = info->par;
 
 
 
 
 
 
 
 
 
 57
 58	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
 
 
 
 
 59}
 60
 61static const struct fb_ops drm_fbdev_dma_fb_ops = {
 62	.owner = THIS_MODULE,
 63	.fb_open = drm_fbdev_dma_fb_open,
 64	.fb_release = drm_fbdev_dma_fb_release,
 65	__FB_DEFAULT_DMAMEM_OPS_RDWR,
 66	DRM_FB_HELPER_DEFAULT_OPS,
 67	__FB_DEFAULT_DMAMEM_OPS_DRAW,
 68	.fb_mmap = drm_fbdev_dma_fb_mmap,
 69	.fb_destroy = drm_fbdev_dma_fb_destroy,
 70};
 71
 72/*
 73 * struct drm_fb_helper
 74 */
 75
 76static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
 77					 struct drm_fb_helper_surface_size *sizes)
 
 78{
 79	struct drm_client_dev *client = &fb_helper->client;
 80	struct drm_device *dev = fb_helper->dev;
 81	struct drm_client_buffer *buffer;
 82	struct drm_gem_dma_object *dma_obj;
 83	struct drm_framebuffer *fb;
 84	struct fb_info *info;
 85	u32 format;
 86	struct iosys_map map;
 87	int ret;
 88
 89	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
 90		    sizes->surface_width, sizes->surface_height,
 91		    sizes->surface_bpp);
 
 
 
 
 
 
 
 
 
 
 
 92
 93	format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
 94	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
 95					       sizes->surface_height, format);
 96	if (IS_ERR(buffer))
 97		return PTR_ERR(buffer);
 98	dma_obj = to_drm_gem_dma_obj(buffer->gem);
 99
100	fb = buffer->fb;
101	if (drm_WARN_ON(dev, fb->funcs->dirty)) {
102		ret = -ENODEV; /* damage handling not supported; use generic emulation */
103		goto err_drm_client_buffer_delete;
104	}
 
105
106	ret = drm_client_buffer_vmap(buffer, &map);
107	if (ret) {
108		goto err_drm_client_buffer_delete;
109	} else if (drm_WARN_ON(dev, map.is_iomem)) {
110		ret = -ENODEV; /* I/O memory not supported; use generic emulation */
111		goto err_drm_client_buffer_delete;
112	}
113
114	fb_helper->buffer = buffer;
115	fb_helper->fb = buffer->fb;
 
 
 
 
116
117	info = drm_fb_helper_alloc_info(fb_helper);
118	if (IS_ERR(info)) {
119		ret = PTR_ERR(info);
120		goto err_drm_client_buffer_vunmap;
121	}
122
123	drm_fb_helper_fill_info(info, fb_helper, sizes);
124
125	info->fbops = &drm_fbdev_dma_fb_ops;
 
 
 
 
 
 
126
127	/* screen */
128	info->flags |= FBINFO_VIRTFB; /* system memory */
129	if (dma_obj->map_noncoherent)
130		info->flags |= FBINFO_READS_FAST; /* signal caching */
131	info->screen_size = sizes->surface_height * fb->pitches[0];
132	info->screen_buffer = map.vaddr;
133	info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
134	info->fix.smem_len = info->screen_size;
 
 
 
 
 
135
136	return 0;
137
138err_drm_client_buffer_vunmap:
139	fb_helper->fb = NULL;
140	fb_helper->buffer = NULL;
141	drm_client_buffer_vunmap(buffer);
142err_drm_client_buffer_delete:
143	drm_client_framebuffer_delete(buffer);
144	return ret;
145}
146
147static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
148	.fb_probe = drm_fbdev_dma_helper_fb_probe,
149};
150
151/*
152 * struct drm_client_funcs
153 */
154
155static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client)
 
156{
157	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
 
 
 
 
 
158
159	if (fb_helper->info) {
160		drm_fb_helper_unregister_info(fb_helper);
161	} else {
162		drm_client_release(&fb_helper->client);
163		drm_fb_helper_unprepare(fb_helper);
164		kfree(fb_helper);
 
 
 
 
 
165	}
166}
167
168static int drm_fbdev_dma_client_restore(struct drm_client_dev *client)
169{
170	drm_fb_helper_lastclose(client->dev);
171
172	return 0;
173}
174
175static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client)
 
176{
177	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
178	struct drm_device *dev = client->dev;
 
 
179	int ret;
180
181	if (dev->fb_helper)
182		return drm_fb_helper_hotplug_event(dev->fb_helper);
 
 
 
 
 
 
 
 
183
184	ret = drm_fb_helper_init(dev, fb_helper);
185	if (ret)
186		goto err_drm_err;
 
 
187
188	if (!drm_drv_uses_atomic_modeset(dev))
189		drm_helper_disable_unused_functions(dev);
190
191	ret = drm_fb_helper_initial_config(fb_helper);
 
192	if (ret)
193		goto err_drm_fb_helper_fini;
194
195	return 0;
196
197err_drm_fb_helper_fini:
198	drm_fb_helper_fini(fb_helper);
199err_drm_err:
200	drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret);
201	return ret;
202}
203
204static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
205	.owner		= THIS_MODULE,
206	.unregister	= drm_fbdev_dma_client_unregister,
207	.restore	= drm_fbdev_dma_client_restore,
208	.hotplug	= drm_fbdev_dma_client_hotplug,
209};
210
211/**
212 * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
213 * @dev: DRM device
214 * @preferred_bpp: Preferred bits per pixel for the device.
215 *                 32 is used if this is zero.
216 *
217 * This function sets up fbdev emulation for GEM DMA drivers that support
218 * dumb buffers with a virtual address and that can be mmap'ed.
219 * drm_fbdev_dma_setup() shall be called after the DRM driver registered
220 * the new DRM device with drm_dev_register().
221 *
222 * Restore, hotplug events and teardown are all taken care of. Drivers that do
223 * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
224 * Simple drivers might use drm_mode_config_helper_suspend().
225 *
226 * This function is safe to call even when there are no connectors present.
227 * Setup will be retried on the next hotplug event.
228 *
229 * The fbdev is destroyed by drm_dev_unregister().
230 */
231void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
232{
233	struct drm_fb_helper *fb_helper;
 
 
 
 
 
 
234	int ret;
235
236	drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
237	drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
 
 
 
 
 
 
 
 
238
239	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
240	if (!fb_helper)
241		return;
242	drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs);
243
244	ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs);
245	if (ret) {
246		drm_err(dev, "Failed to register client: %d\n", ret);
247		goto err_drm_client_init;
 
 
 
 
 
 
 
 
 
 
 
 
248	}
249
250	drm_client_register(&fb_helper->client);
 
 
 
 
 
 
 
251
252	return;
253
254err_drm_client_init:
255	drm_fb_helper_unprepare(fb_helper);
256	kfree(fb_helper);
 
 
 
 
 
 
257}
258EXPORT_SYMBOL(drm_fbdev_dma_setup);