Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/fb.h>
8
9#include <drm/drm_drv.h>
10#include <drm/drm_crtc_helper.h>
11#include <drm/drm_fb_helper.h>
12#include <drm/drm_fourcc.h>
13#include <drm/drm_framebuffer.h>
14#include <drm/drm_prime.h>
15
16#include "msm_drv.h"
17#include "msm_gem.h"
18#include "msm_kms.h"
19
20static bool fbdev = true;
21MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
22module_param(fbdev, bool, 0600);
23
24/*
25 * fbdev funcs, to implement legacy fbdev interface on top of drm driver
26 */
27
28FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(msm_fbdev,
29 drm_fb_helper_damage_range,
30 drm_fb_helper_damage_area)
31
32static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
33{
34 struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
35 struct drm_gem_object *bo = msm_framebuffer_bo(helper->fb, 0);
36
37 return drm_gem_prime_mmap(bo, vma);
38}
39
40static void msm_fbdev_fb_destroy(struct fb_info *info)
41{
42 struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
43 struct drm_framebuffer *fb = helper->fb;
44 struct drm_gem_object *bo = msm_framebuffer_bo(fb, 0);
45
46 DBG();
47
48 drm_fb_helper_fini(helper);
49
50 /* this will free the backing object */
51 msm_gem_put_vaddr(bo);
52 drm_framebuffer_remove(fb);
53
54 drm_client_release(&helper->client);
55 drm_fb_helper_unprepare(helper);
56 kfree(helper);
57}
58
59static const struct fb_ops msm_fb_ops = {
60 .owner = THIS_MODULE,
61 __FB_DEFAULT_DEFERRED_OPS_RDWR(msm_fbdev),
62 DRM_FB_HELPER_DEFAULT_OPS,
63 __FB_DEFAULT_DEFERRED_OPS_DRAW(msm_fbdev),
64 .fb_mmap = msm_fbdev_mmap,
65 .fb_destroy = msm_fbdev_fb_destroy,
66};
67
68static int msm_fbdev_fb_dirty(struct drm_fb_helper *helper,
69 struct drm_clip_rect *clip)
70{
71 struct drm_device *dev = helper->dev;
72 int ret;
73
74 /* Call damage handlers only if necessary */
75 if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
76 return 0;
77
78 if (helper->fb->funcs->dirty) {
79 ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
80 if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
81 return ret;
82 }
83
84 return 0;
85}
86
87static const struct drm_fb_helper_funcs msm_fbdev_helper_funcs = {
88 .fb_dirty = msm_fbdev_fb_dirty,
89};
90
91int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
92 struct drm_fb_helper_surface_size *sizes)
93{
94 struct drm_device *dev = helper->dev;
95 struct msm_drm_private *priv = dev->dev_private;
96 struct drm_framebuffer *fb = NULL;
97 struct drm_gem_object *bo;
98 struct fb_info *fbi = NULL;
99 uint64_t paddr;
100 uint32_t format;
101 int ret, pitch;
102
103 format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
104
105 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
106 sizes->surface_height, sizes->surface_bpp,
107 sizes->fb_width, sizes->fb_height);
108
109 pitch = align_pitch(sizes->surface_width, sizes->surface_bpp);
110 fb = msm_alloc_stolen_fb(dev, sizes->surface_width,
111 sizes->surface_height, pitch, format);
112
113 if (IS_ERR(fb)) {
114 DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
115 return PTR_ERR(fb);
116 }
117
118 bo = msm_framebuffer_bo(fb, 0);
119
120 /*
121 * NOTE: if we can be guaranteed to be able to map buffer
122 * in panic (ie. lock-safe, etc) we could avoid pinning the
123 * buffer now:
124 */
125 ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
126 if (ret) {
127 DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
128 goto fail;
129 }
130
131 fbi = drm_fb_helper_alloc_info(helper);
132 if (IS_ERR(fbi)) {
133 DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
134 ret = PTR_ERR(fbi);
135 goto fail;
136 }
137
138 DBG("fbi=%p, dev=%p", fbi, dev);
139
140 helper->funcs = &msm_fbdev_helper_funcs;
141 helper->fb = fb;
142
143 fbi->fbops = &msm_fb_ops;
144
145 drm_fb_helper_fill_info(fbi, helper, sizes);
146
147 fbi->screen_buffer = msm_gem_get_vaddr(bo);
148 if (IS_ERR(fbi->screen_buffer)) {
149 ret = PTR_ERR(fbi->screen_buffer);
150 goto fail;
151 }
152 fbi->screen_size = bo->size;
153 fbi->fix.smem_start = paddr;
154 fbi->fix.smem_len = bo->size;
155
156 DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
157 DBG("allocated %dx%d fb", fb->width, fb->height);
158
159 return 0;
160
161fail:
162 drm_framebuffer_remove(fb);
163 return ret;
164}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <drm/drm_aperture.h>
8#include <drm/drm_crtc.h>
9#include <drm/drm_fb_helper.h>
10#include <drm/drm_fourcc.h>
11
12#include "msm_drv.h"
13#include "msm_gem.h"
14#include "msm_kms.h"
15
16extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
17 struct vm_area_struct *vma);
18static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
19
20/*
21 * fbdev funcs, to implement legacy fbdev interface on top of drm driver
22 */
23
24#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
25
26struct msm_fbdev {
27 struct drm_fb_helper base;
28 struct drm_framebuffer *fb;
29};
30
31static const struct fb_ops msm_fb_ops = {
32 .owner = THIS_MODULE,
33 DRM_FB_HELPER_DEFAULT_OPS,
34
35 /* Note: to properly handle manual update displays, we wrap the
36 * basic fbdev ops which write to the framebuffer
37 */
38 .fb_read = drm_fb_helper_sys_read,
39 .fb_write = drm_fb_helper_sys_write,
40 .fb_fillrect = drm_fb_helper_sys_fillrect,
41 .fb_copyarea = drm_fb_helper_sys_copyarea,
42 .fb_imageblit = drm_fb_helper_sys_imageblit,
43 .fb_mmap = msm_fbdev_mmap,
44};
45
46static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
47{
48 struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
49 struct msm_fbdev *fbdev = to_msm_fbdev(helper);
50 struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0);
51 int ret = 0;
52
53 ret = drm_gem_mmap_obj(bo, bo->size, vma);
54 if (ret) {
55 pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
56 return ret;
57 }
58
59 return msm_gem_mmap_obj(bo, vma);
60}
61
62static int msm_fbdev_create(struct drm_fb_helper *helper,
63 struct drm_fb_helper_surface_size *sizes)
64{
65 struct msm_fbdev *fbdev = to_msm_fbdev(helper);
66 struct drm_device *dev = helper->dev;
67 struct msm_drm_private *priv = dev->dev_private;
68 struct drm_framebuffer *fb = NULL;
69 struct drm_gem_object *bo;
70 struct fb_info *fbi = NULL;
71 uint64_t paddr;
72 uint32_t format;
73 int ret, pitch;
74
75 format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
76
77 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
78 sizes->surface_height, sizes->surface_bpp,
79 sizes->fb_width, sizes->fb_height);
80
81 pitch = align_pitch(sizes->surface_width, sizes->surface_bpp);
82 fb = msm_alloc_stolen_fb(dev, sizes->surface_width,
83 sizes->surface_height, pitch, format);
84
85 if (IS_ERR(fb)) {
86 DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
87 return PTR_ERR(fb);
88 }
89
90 bo = msm_framebuffer_bo(fb, 0);
91
92 mutex_lock(&dev->struct_mutex);
93
94 /*
95 * NOTE: if we can be guaranteed to be able to map buffer
96 * in panic (ie. lock-safe, etc) we could avoid pinning the
97 * buffer now:
98 */
99 ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
100 if (ret) {
101 DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
102 goto fail_unlock;
103 }
104
105 fbi = drm_fb_helper_alloc_fbi(helper);
106 if (IS_ERR(fbi)) {
107 DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
108 ret = PTR_ERR(fbi);
109 goto fail_unlock;
110 }
111
112 DBG("fbi=%p, dev=%p", fbi, dev);
113
114 fbdev->fb = fb;
115 helper->fb = fb;
116
117 fbi->fbops = &msm_fb_ops;
118
119 drm_fb_helper_fill_info(fbi, helper, sizes);
120
121 dev->mode_config.fb_base = paddr;
122
123 fbi->screen_base = msm_gem_get_vaddr(bo);
124 if (IS_ERR(fbi->screen_base)) {
125 ret = PTR_ERR(fbi->screen_base);
126 goto fail_unlock;
127 }
128 fbi->screen_size = bo->size;
129 fbi->fix.smem_start = paddr;
130 fbi->fix.smem_len = bo->size;
131
132 DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
133 DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
134
135 mutex_unlock(&dev->struct_mutex);
136
137 return 0;
138
139fail_unlock:
140 mutex_unlock(&dev->struct_mutex);
141 drm_framebuffer_remove(fb);
142 return ret;
143}
144
145static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
146 .fb_probe = msm_fbdev_create,
147};
148
149/* initialize fbdev helper */
150struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
151{
152 struct msm_drm_private *priv = dev->dev_private;
153 struct msm_fbdev *fbdev = NULL;
154 struct drm_fb_helper *helper;
155 int ret;
156
157 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
158 if (!fbdev)
159 goto fail;
160
161 helper = &fbdev->base;
162
163 drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
164
165 ret = drm_fb_helper_init(dev, helper);
166 if (ret) {
167 DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
168 goto fail;
169 }
170
171 /* the fw fb could be anywhere in memory */
172 ret = drm_aperture_remove_framebuffers(false, "msm");
173 if (ret)
174 goto fini;
175
176 ret = drm_fb_helper_initial_config(helper, 32);
177 if (ret)
178 goto fini;
179
180 priv->fbdev = helper;
181
182 return helper;
183
184fini:
185 drm_fb_helper_fini(helper);
186fail:
187 kfree(fbdev);
188 return NULL;
189}
190
191void msm_fbdev_free(struct drm_device *dev)
192{
193 struct msm_drm_private *priv = dev->dev_private;
194 struct drm_fb_helper *helper = priv->fbdev;
195 struct msm_fbdev *fbdev;
196
197 DBG();
198
199 drm_fb_helper_unregister_fbi(helper);
200
201 drm_fb_helper_fini(helper);
202
203 fbdev = to_msm_fbdev(priv->fbdev);
204
205 /* this will free the backing object */
206 if (fbdev->fb) {
207 struct drm_gem_object *bo =
208 msm_framebuffer_bo(fbdev->fb, 0);
209 msm_gem_put_vaddr(bo);
210 drm_framebuffer_remove(fbdev->fb);
211 }
212
213 kfree(fbdev);
214
215 priv->fbdev = NULL;
216}