Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/fb.h>
8
9#include <drm/drm_drv.h>
10#include <drm/drm_crtc_helper.h>
11#include <drm/drm_fb_helper.h>
12#include <drm/drm_fourcc.h>
13#include <drm/drm_framebuffer.h>
14#include <drm/drm_prime.h>
15
16#include "msm_drv.h"
17#include "msm_gem.h"
18#include "msm_kms.h"
19
20static bool fbdev = true;
21MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
22module_param(fbdev, bool, 0600);
23
24/*
25 * fbdev funcs, to implement legacy fbdev interface on top of drm driver
26 */
27
28FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(msm_fbdev,
29 drm_fb_helper_damage_range,
30 drm_fb_helper_damage_area)
31
32static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
33{
34 struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
35 struct drm_gem_object *bo = msm_framebuffer_bo(helper->fb, 0);
36
37 return drm_gem_prime_mmap(bo, vma);
38}
39
40static void msm_fbdev_fb_destroy(struct fb_info *info)
41{
42 struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
43 struct drm_framebuffer *fb = helper->fb;
44 struct drm_gem_object *bo = msm_framebuffer_bo(fb, 0);
45
46 DBG();
47
48 drm_fb_helper_fini(helper);
49
50 /* this will free the backing object */
51 msm_gem_put_vaddr(bo);
52 drm_framebuffer_remove(fb);
53
54 drm_client_release(&helper->client);
55 drm_fb_helper_unprepare(helper);
56 kfree(helper);
57}
58
59static const struct fb_ops msm_fb_ops = {
60 .owner = THIS_MODULE,
61 __FB_DEFAULT_DEFERRED_OPS_RDWR(msm_fbdev),
62 DRM_FB_HELPER_DEFAULT_OPS,
63 __FB_DEFAULT_DEFERRED_OPS_DRAW(msm_fbdev),
64 .fb_mmap = msm_fbdev_mmap,
65 .fb_destroy = msm_fbdev_fb_destroy,
66};
67
68static int msm_fbdev_fb_dirty(struct drm_fb_helper *helper,
69 struct drm_clip_rect *clip)
70{
71 struct drm_device *dev = helper->dev;
72 int ret;
73
74 /* Call damage handlers only if necessary */
75 if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
76 return 0;
77
78 if (helper->fb->funcs->dirty) {
79 ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
80 if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
81 return ret;
82 }
83
84 return 0;
85}
86
87static const struct drm_fb_helper_funcs msm_fbdev_helper_funcs = {
88 .fb_dirty = msm_fbdev_fb_dirty,
89};
90
91int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
92 struct drm_fb_helper_surface_size *sizes)
93{
94 struct drm_device *dev = helper->dev;
95 struct msm_drm_private *priv = dev->dev_private;
96 struct drm_framebuffer *fb = NULL;
97 struct drm_gem_object *bo;
98 struct fb_info *fbi = NULL;
99 uint64_t paddr;
100 uint32_t format;
101 int ret, pitch;
102
103 format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
104
105 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
106 sizes->surface_height, sizes->surface_bpp,
107 sizes->fb_width, sizes->fb_height);
108
109 pitch = align_pitch(sizes->surface_width, sizes->surface_bpp);
110 fb = msm_alloc_stolen_fb(dev, sizes->surface_width,
111 sizes->surface_height, pitch, format);
112
113 if (IS_ERR(fb)) {
114 DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
115 return PTR_ERR(fb);
116 }
117
118 bo = msm_framebuffer_bo(fb, 0);
119
120 /*
121 * NOTE: if we can be guaranteed to be able to map buffer
122 * in panic (ie. lock-safe, etc) we could avoid pinning the
123 * buffer now:
124 */
125 ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
126 if (ret) {
127 DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
128 goto fail;
129 }
130
131 fbi = drm_fb_helper_alloc_info(helper);
132 if (IS_ERR(fbi)) {
133 DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
134 ret = PTR_ERR(fbi);
135 goto fail;
136 }
137
138 DBG("fbi=%p, dev=%p", fbi, dev);
139
140 helper->funcs = &msm_fbdev_helper_funcs;
141 helper->fb = fb;
142
143 fbi->fbops = &msm_fb_ops;
144
145 drm_fb_helper_fill_info(fbi, helper, sizes);
146
147 fbi->screen_buffer = msm_gem_get_vaddr(bo);
148 if (IS_ERR(fbi->screen_buffer)) {
149 ret = PTR_ERR(fbi->screen_buffer);
150 goto fail;
151 }
152 fbi->screen_size = bo->size;
153 fbi->fix.smem_start = paddr;
154 fbi->fix.smem_len = bo->size;
155
156 DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
157 DBG("allocated %dx%d fb", fb->width, fb->height);
158
159 return 0;
160
161fail:
162 drm_framebuffer_remove(fb);
163 return ret;
164}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <drm/drm_crtc.h>
8#include <drm/drm_fb_helper.h>
9#include <drm/drm_fourcc.h>
10
11#include "msm_drv.h"
12#include "msm_kms.h"
13
14extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
15 struct vm_area_struct *vma);
16static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
17
18/*
19 * fbdev funcs, to implement legacy fbdev interface on top of drm driver
20 */
21
22#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
23
24struct msm_fbdev {
25 struct drm_fb_helper base;
26 struct drm_framebuffer *fb;
27};
28
29static struct fb_ops msm_fb_ops = {
30 .owner = THIS_MODULE,
31 DRM_FB_HELPER_DEFAULT_OPS,
32
33 /* Note: to properly handle manual update displays, we wrap the
34 * basic fbdev ops which write to the framebuffer
35 */
36 .fb_read = drm_fb_helper_sys_read,
37 .fb_write = drm_fb_helper_sys_write,
38 .fb_fillrect = drm_fb_helper_sys_fillrect,
39 .fb_copyarea = drm_fb_helper_sys_copyarea,
40 .fb_imageblit = drm_fb_helper_sys_imageblit,
41 .fb_mmap = msm_fbdev_mmap,
42};
43
44static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
45{
46 struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
47 struct msm_fbdev *fbdev = to_msm_fbdev(helper);
48 struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0);
49 int ret = 0;
50
51 ret = drm_gem_mmap_obj(bo, bo->size, vma);
52 if (ret) {
53 pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
54 return ret;
55 }
56
57 return msm_gem_mmap_obj(bo, vma);
58}
59
60static int msm_fbdev_create(struct drm_fb_helper *helper,
61 struct drm_fb_helper_surface_size *sizes)
62{
63 struct msm_fbdev *fbdev = to_msm_fbdev(helper);
64 struct drm_device *dev = helper->dev;
65 struct msm_drm_private *priv = dev->dev_private;
66 struct drm_framebuffer *fb = NULL;
67 struct drm_gem_object *bo;
68 struct fb_info *fbi = NULL;
69 uint64_t paddr;
70 uint32_t format;
71 int ret, pitch;
72
73 format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
74
75 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
76 sizes->surface_height, sizes->surface_bpp,
77 sizes->fb_width, sizes->fb_height);
78
79 pitch = align_pitch(sizes->surface_width, sizes->surface_bpp);
80 fb = msm_alloc_stolen_fb(dev, sizes->surface_width,
81 sizes->surface_height, pitch, format);
82
83 if (IS_ERR(fb)) {
84 DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
85 return PTR_ERR(fb);
86 }
87
88 bo = msm_framebuffer_bo(fb, 0);
89
90 mutex_lock(&dev->struct_mutex);
91
92 /*
93 * NOTE: if we can be guaranteed to be able to map buffer
94 * in panic (ie. lock-safe, etc) we could avoid pinning the
95 * buffer now:
96 */
97 ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
98 if (ret) {
99 DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
100 goto fail_unlock;
101 }
102
103 fbi = drm_fb_helper_alloc_fbi(helper);
104 if (IS_ERR(fbi)) {
105 DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
106 ret = PTR_ERR(fbi);
107 goto fail_unlock;
108 }
109
110 DBG("fbi=%p, dev=%p", fbi, dev);
111
112 fbdev->fb = fb;
113 helper->fb = fb;
114
115 fbi->fbops = &msm_fb_ops;
116
117 drm_fb_helper_fill_info(fbi, helper, sizes);
118
119 dev->mode_config.fb_base = paddr;
120
121 fbi->screen_base = msm_gem_get_vaddr(bo);
122 if (IS_ERR(fbi->screen_base)) {
123 ret = PTR_ERR(fbi->screen_base);
124 goto fail_unlock;
125 }
126 fbi->screen_size = bo->size;
127 fbi->fix.smem_start = paddr;
128 fbi->fix.smem_len = bo->size;
129
130 DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
131 DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
132
133 mutex_unlock(&dev->struct_mutex);
134
135 return 0;
136
137fail_unlock:
138 mutex_unlock(&dev->struct_mutex);
139 drm_framebuffer_remove(fb);
140 return ret;
141}
142
143static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
144 .fb_probe = msm_fbdev_create,
145};
146
147/* initialize fbdev helper */
148struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
149{
150 struct msm_drm_private *priv = dev->dev_private;
151 struct msm_fbdev *fbdev = NULL;
152 struct drm_fb_helper *helper;
153 int ret;
154
155 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
156 if (!fbdev)
157 goto fail;
158
159 helper = &fbdev->base;
160
161 drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
162
163 ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
164 if (ret) {
165 DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
166 goto fail;
167 }
168
169 ret = drm_fb_helper_single_add_all_connectors(helper);
170 if (ret)
171 goto fini;
172
173 /* the fw fb could be anywhere in memory */
174 drm_fb_helper_remove_conflicting_framebuffers(NULL, "msm", false);
175
176 ret = drm_fb_helper_initial_config(helper, 32);
177 if (ret)
178 goto fini;
179
180 priv->fbdev = helper;
181
182 return helper;
183
184fini:
185 drm_fb_helper_fini(helper);
186fail:
187 kfree(fbdev);
188 return NULL;
189}
190
191void msm_fbdev_free(struct drm_device *dev)
192{
193 struct msm_drm_private *priv = dev->dev_private;
194 struct drm_fb_helper *helper = priv->fbdev;
195 struct msm_fbdev *fbdev;
196
197 DBG();
198
199 drm_fb_helper_unregister_fbi(helper);
200
201 drm_fb_helper_fini(helper);
202
203 fbdev = to_msm_fbdev(priv->fbdev);
204
205 /* this will free the backing object */
206 if (fbdev->fb) {
207 struct drm_gem_object *bo =
208 msm_framebuffer_bo(fbdev->fb, 0);
209 msm_gem_put_vaddr(bo);
210 drm_framebuffer_remove(fbdev->fb);
211 }
212
213 kfree(fbdev);
214
215 priv->fbdev = NULL;
216}