Loading...
1/*
2 * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/dma-buf.h>
21
22#include "omap_drv.h"
23
24/* -----------------------------------------------------------------------------
25 * DMABUF Export
26 */
27
28static struct sg_table *omap_gem_map_dma_buf(
29 struct dma_buf_attachment *attachment,
30 enum dma_data_direction dir)
31{
32 struct drm_gem_object *obj = attachment->dmabuf->priv;
33 struct sg_table *sg;
34 dma_addr_t paddr;
35 int ret;
36
37 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
38 if (!sg)
39 return ERR_PTR(-ENOMEM);
40
41 /* camera, etc, need physically contiguous.. but we need a
42 * better way to know this..
43 */
44 ret = omap_gem_get_paddr(obj, &paddr, true);
45 if (ret)
46 goto out;
47
48 ret = sg_alloc_table(sg, 1, GFP_KERNEL);
49 if (ret)
50 goto out;
51
52 sg_init_table(sg->sgl, 1);
53 sg_dma_len(sg->sgl) = obj->size;
54 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
55 sg_dma_address(sg->sgl) = paddr;
56
57 /* this should be after _get_paddr() to ensure we have pages attached */
58 omap_gem_dma_sync(obj, dir);
59
60 return sg;
61out:
62 kfree(sg);
63 return ERR_PTR(ret);
64}
65
66static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
67 struct sg_table *sg, enum dma_data_direction dir)
68{
69 struct drm_gem_object *obj = attachment->dmabuf->priv;
70 omap_gem_put_paddr(obj);
71 sg_free_table(sg);
72 kfree(sg);
73}
74
75static void omap_gem_dmabuf_release(struct dma_buf *buffer)
76{
77 struct drm_gem_object *obj = buffer->priv;
78 /* release reference that was taken when dmabuf was exported
79 * in omap_gem_prime_set()..
80 */
81 drm_gem_object_unreference_unlocked(obj);
82}
83
84
85static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
86 enum dma_data_direction dir)
87{
88 struct drm_gem_object *obj = buffer->priv;
89 struct page **pages;
90 if (omap_gem_flags(obj) & OMAP_BO_TILED) {
91 /* TODO we would need to pin at least part of the buffer to
92 * get de-tiled view. For now just reject it.
93 */
94 return -ENOMEM;
95 }
96 /* make sure we have the pages: */
97 return omap_gem_get_pages(obj, &pages, true);
98}
99
100static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
101 enum dma_data_direction dir)
102{
103 struct drm_gem_object *obj = buffer->priv;
104 omap_gem_put_pages(obj);
105 return 0;
106}
107
108
109static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
110 unsigned long page_num)
111{
112 struct drm_gem_object *obj = buffer->priv;
113 struct page **pages;
114 omap_gem_get_pages(obj, &pages, false);
115 omap_gem_cpu_sync(obj, page_num);
116 return kmap_atomic(pages[page_num]);
117}
118
119static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
120 unsigned long page_num, void *addr)
121{
122 kunmap_atomic(addr);
123}
124
125static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
126 unsigned long page_num)
127{
128 struct drm_gem_object *obj = buffer->priv;
129 struct page **pages;
130 omap_gem_get_pages(obj, &pages, false);
131 omap_gem_cpu_sync(obj, page_num);
132 return kmap(pages[page_num]);
133}
134
135static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
136 unsigned long page_num, void *addr)
137{
138 struct drm_gem_object *obj = buffer->priv;
139 struct page **pages;
140 omap_gem_get_pages(obj, &pages, false);
141 kunmap(pages[page_num]);
142}
143
144static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
145 struct vm_area_struct *vma)
146{
147 struct drm_gem_object *obj = buffer->priv;
148 int ret = 0;
149
150 if (WARN_ON(!obj->filp))
151 return -EINVAL;
152
153 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
154 if (ret < 0)
155 return ret;
156
157 return omap_gem_mmap_obj(obj, vma);
158}
159
160static struct dma_buf_ops omap_dmabuf_ops = {
161 .map_dma_buf = omap_gem_map_dma_buf,
162 .unmap_dma_buf = omap_gem_unmap_dma_buf,
163 .release = omap_gem_dmabuf_release,
164 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
165 .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
166 .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
167 .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
168 .kmap = omap_gem_dmabuf_kmap,
169 .kunmap = omap_gem_dmabuf_kunmap,
170 .mmap = omap_gem_dmabuf_mmap,
171};
172
173struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
174 struct drm_gem_object *obj, int flags)
175{
176 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
177
178 exp_info.ops = &omap_dmabuf_ops;
179 exp_info.size = obj->size;
180 exp_info.flags = flags;
181 exp_info.priv = obj;
182
183 return dma_buf_export(&exp_info);
184}
185
186/* -----------------------------------------------------------------------------
187 * DMABUF Import
188 */
189
190struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
191 struct dma_buf *dma_buf)
192{
193 struct dma_buf_attachment *attach;
194 struct drm_gem_object *obj;
195 struct sg_table *sgt;
196 int ret;
197
198 if (dma_buf->ops == &omap_dmabuf_ops) {
199 obj = dma_buf->priv;
200 if (obj->dev == dev) {
201 /*
202 * Importing dmabuf exported from out own gem increases
203 * refcount on gem itself instead of f_count of dmabuf.
204 */
205 drm_gem_object_reference(obj);
206 return obj;
207 }
208 }
209
210 attach = dma_buf_attach(dma_buf, dev->dev);
211 if (IS_ERR(attach))
212 return ERR_CAST(attach);
213
214 get_dma_buf(dma_buf);
215
216 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
217 if (IS_ERR(sgt)) {
218 ret = PTR_ERR(sgt);
219 goto fail_detach;
220 }
221
222 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
223 if (IS_ERR(obj)) {
224 ret = PTR_ERR(obj);
225 goto fail_unmap;
226 }
227
228 obj->import_attach = attach;
229
230 return obj;
231
232fail_unmap:
233 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
234fail_detach:
235 dma_buf_detach(dma_buf, attach);
236 dma_buf_put(dma_buf);
237
238 return ERR_PTR(ret);
239}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
4 * Author: Rob Clark <rob.clark@linaro.org>
5 */
6
7#include <linux/dma-buf.h>
8#include <linux/highmem.h>
9
10#include <drm/drm_prime.h>
11
12#include "omap_drv.h"
13
14/* -----------------------------------------------------------------------------
15 * DMABUF Export
16 */
17
18static struct sg_table *omap_gem_map_dma_buf(
19 struct dma_buf_attachment *attachment,
20 enum dma_data_direction dir)
21{
22 struct drm_gem_object *obj = attachment->dmabuf->priv;
23 struct sg_table *sg;
24 dma_addr_t dma_addr;
25 int ret;
26
27 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
28 if (!sg)
29 return ERR_PTR(-ENOMEM);
30
31 /* camera, etc, need physically contiguous.. but we need a
32 * better way to know this..
33 */
34 ret = omap_gem_pin(obj, &dma_addr);
35 if (ret)
36 goto out;
37
38 ret = sg_alloc_table(sg, 1, GFP_KERNEL);
39 if (ret)
40 goto out;
41
42 sg_init_table(sg->sgl, 1);
43 sg_dma_len(sg->sgl) = obj->size;
44 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
45 sg_dma_address(sg->sgl) = dma_addr;
46
47 /* this must be after omap_gem_pin() to ensure we have pages attached */
48 omap_gem_dma_sync_buffer(obj, dir);
49
50 return sg;
51out:
52 kfree(sg);
53 return ERR_PTR(ret);
54}
55
56static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
57 struct sg_table *sg, enum dma_data_direction dir)
58{
59 struct drm_gem_object *obj = attachment->dmabuf->priv;
60 omap_gem_unpin(obj);
61 sg_free_table(sg);
62 kfree(sg);
63}
64
65static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
66 enum dma_data_direction dir)
67{
68 struct drm_gem_object *obj = buffer->priv;
69 struct page **pages;
70 if (omap_gem_flags(obj) & OMAP_BO_TILED) {
71 /* TODO we would need to pin at least part of the buffer to
72 * get de-tiled view. For now just reject it.
73 */
74 return -ENOMEM;
75 }
76 /* make sure we have the pages: */
77 return omap_gem_get_pages(obj, &pages, true);
78}
79
80static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
81 enum dma_data_direction dir)
82{
83 struct drm_gem_object *obj = buffer->priv;
84 omap_gem_put_pages(obj);
85 return 0;
86}
87
88static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
89 unsigned long page_num)
90{
91 struct drm_gem_object *obj = buffer->priv;
92 struct page **pages;
93 omap_gem_get_pages(obj, &pages, false);
94 omap_gem_cpu_sync_page(obj, page_num);
95 return kmap(pages[page_num]);
96}
97
98static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
99 unsigned long page_num, void *addr)
100{
101 struct drm_gem_object *obj = buffer->priv;
102 struct page **pages;
103 omap_gem_get_pages(obj, &pages, false);
104 kunmap(pages[page_num]);
105}
106
107static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
108 struct vm_area_struct *vma)
109{
110 struct drm_gem_object *obj = buffer->priv;
111 int ret = 0;
112
113 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
114 if (ret < 0)
115 return ret;
116
117 return omap_gem_mmap_obj(obj, vma);
118}
119
120static const struct dma_buf_ops omap_dmabuf_ops = {
121 .map_dma_buf = omap_gem_map_dma_buf,
122 .unmap_dma_buf = omap_gem_unmap_dma_buf,
123 .release = drm_gem_dmabuf_release,
124 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
125 .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
126 .map = omap_gem_dmabuf_kmap,
127 .unmap = omap_gem_dmabuf_kunmap,
128 .mmap = omap_gem_dmabuf_mmap,
129};
130
131struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
132{
133 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
134
135 exp_info.ops = &omap_dmabuf_ops;
136 exp_info.size = obj->size;
137 exp_info.flags = flags;
138 exp_info.priv = obj;
139
140 return drm_gem_dmabuf_export(obj->dev, &exp_info);
141}
142
143/* -----------------------------------------------------------------------------
144 * DMABUF Import
145 */
146
147struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
148 struct dma_buf *dma_buf)
149{
150 struct dma_buf_attachment *attach;
151 struct drm_gem_object *obj;
152 struct sg_table *sgt;
153 int ret;
154
155 if (dma_buf->ops == &omap_dmabuf_ops) {
156 obj = dma_buf->priv;
157 if (obj->dev == dev) {
158 /*
159 * Importing dmabuf exported from out own gem increases
160 * refcount on gem itself instead of f_count of dmabuf.
161 */
162 drm_gem_object_get(obj);
163 return obj;
164 }
165 }
166
167 attach = dma_buf_attach(dma_buf, dev->dev);
168 if (IS_ERR(attach))
169 return ERR_CAST(attach);
170
171 get_dma_buf(dma_buf);
172
173 sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
174 if (IS_ERR(sgt)) {
175 ret = PTR_ERR(sgt);
176 goto fail_detach;
177 }
178
179 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
180 if (IS_ERR(obj)) {
181 ret = PTR_ERR(obj);
182 goto fail_unmap;
183 }
184
185 obj->import_attach = attach;
186
187 return obj;
188
189fail_unmap:
190 dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
191fail_detach:
192 dma_buf_detach(dma_buf, attach);
193 dma_buf_put(dma_buf);
194
195 return ERR_PTR(ret);
196}