Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2012 Red Hat
  3 *
  4 * This file is subject to the terms and conditions of the GNU General
  5 * Public License version 2. See the file COPYING in the main
  6 * directory of this archive for more details.
  7 *
  8 * Authors: Matthew Garrett
  9 *          Dave Airlie
 10 */
 11#include <linux/module.h>
 12#include <drm/drmP.h>
 13#include <drm/drm_fb_helper.h>
 14#include <drm/drm_crtc_helper.h>
 15
 16#include <linux/fb.h>
 17
 18#include "cirrus_drv.h"
 19
 20static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
 21			     int x, int y, int width, int height)
 22{
 23	int i;
 24	struct drm_gem_object *obj;
 25	struct cirrus_bo *bo;
 26	int src_offset, dst_offset;
 27	int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
 28	int ret = -EBUSY;
 29	bool unmap = false;
 30	bool store_for_later = false;
 31	int x2, y2;
 32	unsigned long flags;
 33
 34	obj = afbdev->gfb.obj;
 35	bo = gem_to_cirrus_bo(obj);
 36
 37	/*
 38	 * try and reserve the BO, if we fail with busy
 39	 * then the BO is being moved and we should
 40	 * store up the damage until later.
 41	 */
 42	if (drm_can_sleep())
 43		ret = cirrus_bo_reserve(bo, true);
 44	if (ret) {
 45		if (ret != -EBUSY)
 46			return;
 47		store_for_later = true;
 48	}
 49
 50	x2 = x + width - 1;
 51	y2 = y + height - 1;
 52	spin_lock_irqsave(&afbdev->dirty_lock, flags);
 53
 54	if (afbdev->y1 < y)
 55		y = afbdev->y1;
 56	if (afbdev->y2 > y2)
 57		y2 = afbdev->y2;
 58	if (afbdev->x1 < x)
 59		x = afbdev->x1;
 60	if (afbdev->x2 > x2)
 61		x2 = afbdev->x2;
 62
 63	if (store_for_later) {
 64		afbdev->x1 = x;
 65		afbdev->x2 = x2;
 66		afbdev->y1 = y;
 67		afbdev->y2 = y2;
 68		spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
 69		return;
 70	}
 71
 72	afbdev->x1 = afbdev->y1 = INT_MAX;
 73	afbdev->x2 = afbdev->y2 = 0;
 74	spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
 75
 76	if (!bo->kmap.virtual) {
 77		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
 78		if (ret) {
 79			DRM_ERROR("failed to kmap fb updates\n");
 80			cirrus_bo_unreserve(bo);
 81			return;
 82		}
 83		unmap = true;
 84	}
 85	for (i = y; i < y + height; i++) {
 86		/* assume equal stride for now */
 87		src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
 88		memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
 89
 90	}
 91	if (unmap)
 92		ttm_bo_kunmap(&bo->kmap);
 93
 94	cirrus_bo_unreserve(bo);
 95}
 96
 97static void cirrus_fillrect(struct fb_info *info,
 98			 const struct fb_fillrect *rect)
 99{
100	struct cirrus_fbdev *afbdev = info->par;
101	sys_fillrect(info, rect);
102	cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
103			 rect->height);
104}
105
106static void cirrus_copyarea(struct fb_info *info,
107			 const struct fb_copyarea *area)
108{
109	struct cirrus_fbdev *afbdev = info->par;
110	sys_copyarea(info, area);
111	cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
112			 area->height);
113}
114
115static void cirrus_imageblit(struct fb_info *info,
116			  const struct fb_image *image)
117{
118	struct cirrus_fbdev *afbdev = info->par;
119	sys_imageblit(info, image);
120	cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
121			 image->height);
122}
123
124
125static struct fb_ops cirrusfb_ops = {
126	.owner = THIS_MODULE,
127	.fb_check_var = drm_fb_helper_check_var,
128	.fb_set_par = drm_fb_helper_set_par,
129	.fb_fillrect = cirrus_fillrect,
130	.fb_copyarea = cirrus_copyarea,
131	.fb_imageblit = cirrus_imageblit,
132	.fb_pan_display = drm_fb_helper_pan_display,
133	.fb_blank = drm_fb_helper_blank,
134	.fb_setcmap = drm_fb_helper_setcmap,
135};
136
137static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
138			       struct drm_mode_fb_cmd2 *mode_cmd,
139			       struct drm_gem_object **gobj_p)
140{
141	struct drm_device *dev = afbdev->helper.dev;
142	u32 bpp, depth;
143	u32 size;
144	struct drm_gem_object *gobj;
145
146	int ret = 0;
147	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
148
149	if (bpp > 24)
150		return -EINVAL;
151	size = mode_cmd->pitches[0] * mode_cmd->height;
152	ret = cirrus_gem_create(dev, size, true, &gobj);
153	if (ret)
154		return ret;
155
156	*gobj_p = gobj;
157	return ret;
158}
159
160static int cirrusfb_create(struct drm_fb_helper *helper,
161			   struct drm_fb_helper_surface_size *sizes)
162{
163	struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
164	struct drm_device *dev = gfbdev->helper.dev;
165	struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
166	struct fb_info *info;
167	struct drm_framebuffer *fb;
168	struct drm_mode_fb_cmd2 mode_cmd;
169	struct device *device = &dev->pdev->dev;
170	void *sysram;
171	struct drm_gem_object *gobj = NULL;
172	struct cirrus_bo *bo = NULL;
173	int size, ret;
174
175	mode_cmd.width = sizes->surface_width;
176	mode_cmd.height = sizes->surface_height;
177	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
178	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
179							  sizes->surface_depth);
180	size = mode_cmd.pitches[0] * mode_cmd.height;
181
182	ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
183	if (ret) {
184		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
185		return ret;
186	}
187
188	bo = gem_to_cirrus_bo(gobj);
189
190	sysram = vmalloc(size);
191	if (!sysram)
192		return -ENOMEM;
193
194	info = framebuffer_alloc(0, device);
195	if (info == NULL)
196		return -ENOMEM;
197
198	info->par = gfbdev;
199
200	ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
201	if (ret)
202		return ret;
203
204	gfbdev->sysram = sysram;
205	gfbdev->size = size;
206
207	fb = &gfbdev->gfb.base;
208	if (!fb) {
209		DRM_INFO("fb is NULL\n");
210		return -EINVAL;
211	}
212
213	/* setup helper */
214	gfbdev->helper.fb = fb;
215	gfbdev->helper.fbdev = info;
216
217	strcpy(info->fix.id, "cirrusdrmfb");
218
219
220	info->flags = FBINFO_DEFAULT;
221	info->fbops = &cirrusfb_ops;
222
223	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
224	drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
225			       sizes->fb_height);
226
227	/* setup aperture base/size for vesafb takeover */
228	info->apertures = alloc_apertures(1);
229	if (!info->apertures) {
230		ret = -ENOMEM;
231		goto out_iounmap;
232	}
233	info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
234	info->apertures->ranges[0].size = cdev->mc.vram_size;
235
236	info->fix.smem_start = cdev->dev->mode_config.fb_base;
237	info->fix.smem_len = cdev->mc.vram_size;
238
239	info->screen_base = sysram;
240	info->screen_size = size;
241
242	info->fix.mmio_start = 0;
243	info->fix.mmio_len = 0;
244
245	ret = fb_alloc_cmap(&info->cmap, 256, 0);
246	if (ret) {
247		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
248		ret = -ENOMEM;
249		goto out_iounmap;
250	}
251
252	DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
253	DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
254	DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
255	DRM_INFO("fb depth is %d\n", fb->depth);
256	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
257
258	return 0;
259out_iounmap:
260	return ret;
261}
262
263static int cirrus_fbdev_destroy(struct drm_device *dev,
264				struct cirrus_fbdev *gfbdev)
265{
266	struct fb_info *info;
267	struct cirrus_framebuffer *gfb = &gfbdev->gfb;
268
269	if (gfbdev->helper.fbdev) {
270		info = gfbdev->helper.fbdev;
271
272		unregister_framebuffer(info);
273		if (info->cmap.len)
274			fb_dealloc_cmap(&info->cmap);
275		framebuffer_release(info);
276	}
277
278	if (gfb->obj) {
279		drm_gem_object_unreference_unlocked(gfb->obj);
280		gfb->obj = NULL;
281	}
282
283	vfree(gfbdev->sysram);
284	drm_fb_helper_fini(&gfbdev->helper);
285	drm_framebuffer_unregister_private(&gfb->base);
286	drm_framebuffer_cleanup(&gfb->base);
287
288	return 0;
289}
290
291static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
292	.gamma_set = cirrus_crtc_fb_gamma_set,
293	.gamma_get = cirrus_crtc_fb_gamma_get,
294	.fb_probe = cirrusfb_create,
295};
296
297int cirrus_fbdev_init(struct cirrus_device *cdev)
298{
299	struct cirrus_fbdev *gfbdev;
300	int ret;
301	int bpp_sel = 24;
302
303	/*bpp_sel = 8;*/
304	gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
305	if (!gfbdev)
306		return -ENOMEM;
307
308	cdev->mode_info.gfbdev = gfbdev;
309	gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
310	spin_lock_init(&gfbdev->dirty_lock);
311
312	ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
313				 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
314	if (ret) {
315		kfree(gfbdev);
316		return ret;
317	}
318	drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
319
320	/* disable all the possible outputs/crtcs before entering KMS mode */
321	drm_helper_disable_unused_functions(cdev->dev);
322	drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
323
324	return 0;
325}
326
327void cirrus_fbdev_fini(struct cirrus_device *cdev)
328{
329	if (!cdev->mode_info.gfbdev)
330		return;
331
332	cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
333	kfree(cdev->mode_info.gfbdev);
334	cdev->mode_info.gfbdev = NULL;
335}