Loading...
1/*
2 * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/dma-buf.h>
21
22#include "omap_drv.h"
23
24/* -----------------------------------------------------------------------------
25 * DMABUF Export
26 */
27
28static struct sg_table *omap_gem_map_dma_buf(
29 struct dma_buf_attachment *attachment,
30 enum dma_data_direction dir)
31{
32 struct drm_gem_object *obj = attachment->dmabuf->priv;
33 struct sg_table *sg;
34 dma_addr_t paddr;
35 int ret;
36
37 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
38 if (!sg)
39 return ERR_PTR(-ENOMEM);
40
41 /* camera, etc, need physically contiguous.. but we need a
42 * better way to know this..
43 */
44 ret = omap_gem_get_paddr(obj, &paddr, true);
45 if (ret)
46 goto out;
47
48 ret = sg_alloc_table(sg, 1, GFP_KERNEL);
49 if (ret)
50 goto out;
51
52 sg_init_table(sg->sgl, 1);
53 sg_dma_len(sg->sgl) = obj->size;
54 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
55 sg_dma_address(sg->sgl) = paddr;
56
57 /* this should be after _get_paddr() to ensure we have pages attached */
58 omap_gem_dma_sync(obj, dir);
59
60 return sg;
61out:
62 kfree(sg);
63 return ERR_PTR(ret);
64}
65
66static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
67 struct sg_table *sg, enum dma_data_direction dir)
68{
69 struct drm_gem_object *obj = attachment->dmabuf->priv;
70 omap_gem_put_paddr(obj);
71 sg_free_table(sg);
72 kfree(sg);
73}
74
75static void omap_gem_dmabuf_release(struct dma_buf *buffer)
76{
77 struct drm_gem_object *obj = buffer->priv;
78 /* release reference that was taken when dmabuf was exported
79 * in omap_gem_prime_set()..
80 */
81 drm_gem_object_unreference_unlocked(obj);
82}
83
84
85static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
86 enum dma_data_direction dir)
87{
88 struct drm_gem_object *obj = buffer->priv;
89 struct page **pages;
90 if (omap_gem_flags(obj) & OMAP_BO_TILED) {
91 /* TODO we would need to pin at least part of the buffer to
92 * get de-tiled view. For now just reject it.
93 */
94 return -ENOMEM;
95 }
96 /* make sure we have the pages: */
97 return omap_gem_get_pages(obj, &pages, true);
98}
99
100static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
101 enum dma_data_direction dir)
102{
103 struct drm_gem_object *obj = buffer->priv;
104 omap_gem_put_pages(obj);
105 return 0;
106}
107
108
109static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
110 unsigned long page_num)
111{
112 struct drm_gem_object *obj = buffer->priv;
113 struct page **pages;
114 omap_gem_get_pages(obj, &pages, false);
115 omap_gem_cpu_sync(obj, page_num);
116 return kmap_atomic(pages[page_num]);
117}
118
119static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
120 unsigned long page_num, void *addr)
121{
122 kunmap_atomic(addr);
123}
124
125static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
126 unsigned long page_num)
127{
128 struct drm_gem_object *obj = buffer->priv;
129 struct page **pages;
130 omap_gem_get_pages(obj, &pages, false);
131 omap_gem_cpu_sync(obj, page_num);
132 return kmap(pages[page_num]);
133}
134
135static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
136 unsigned long page_num, void *addr)
137{
138 struct drm_gem_object *obj = buffer->priv;
139 struct page **pages;
140 omap_gem_get_pages(obj, &pages, false);
141 kunmap(pages[page_num]);
142}
143
144static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
145 struct vm_area_struct *vma)
146{
147 struct drm_gem_object *obj = buffer->priv;
148 int ret = 0;
149
150 if (WARN_ON(!obj->filp))
151 return -EINVAL;
152
153 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
154 if (ret < 0)
155 return ret;
156
157 return omap_gem_mmap_obj(obj, vma);
158}
159
160static struct dma_buf_ops omap_dmabuf_ops = {
161 .map_dma_buf = omap_gem_map_dma_buf,
162 .unmap_dma_buf = omap_gem_unmap_dma_buf,
163 .release = omap_gem_dmabuf_release,
164 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
165 .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
166 .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
167 .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
168 .kmap = omap_gem_dmabuf_kmap,
169 .kunmap = omap_gem_dmabuf_kunmap,
170 .mmap = omap_gem_dmabuf_mmap,
171};
172
173struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
174 struct drm_gem_object *obj, int flags)
175{
176 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
177
178 exp_info.ops = &omap_dmabuf_ops;
179 exp_info.size = obj->size;
180 exp_info.flags = flags;
181 exp_info.priv = obj;
182
183 return dma_buf_export(&exp_info);
184}
185
186/* -----------------------------------------------------------------------------
187 * DMABUF Import
188 */
189
190struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
191 struct dma_buf *dma_buf)
192{
193 struct dma_buf_attachment *attach;
194 struct drm_gem_object *obj;
195 struct sg_table *sgt;
196 int ret;
197
198 if (dma_buf->ops == &omap_dmabuf_ops) {
199 obj = dma_buf->priv;
200 if (obj->dev == dev) {
201 /*
202 * Importing dmabuf exported from out own gem increases
203 * refcount on gem itself instead of f_count of dmabuf.
204 */
205 drm_gem_object_reference(obj);
206 return obj;
207 }
208 }
209
210 attach = dma_buf_attach(dma_buf, dev->dev);
211 if (IS_ERR(attach))
212 return ERR_CAST(attach);
213
214 get_dma_buf(dma_buf);
215
216 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
217 if (IS_ERR(sgt)) {
218 ret = PTR_ERR(sgt);
219 goto fail_detach;
220 }
221
222 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
223 if (IS_ERR(obj)) {
224 ret = PTR_ERR(obj);
225 goto fail_unmap;
226 }
227
228 obj->import_attach = attach;
229
230 return obj;
231
232fail_unmap:
233 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
234fail_detach:
235 dma_buf_detach(dma_buf, attach);
236 dma_buf_put(dma_buf);
237
238 return ERR_PTR(ret);
239}
1/*
2 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
3 * Author: Rob Clark <rob.clark@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/dma-buf.h>
19
20#include "omap_drv.h"
21
22/* -----------------------------------------------------------------------------
23 * DMABUF Export
24 */
25
26static struct sg_table *omap_gem_map_dma_buf(
27 struct dma_buf_attachment *attachment,
28 enum dma_data_direction dir)
29{
30 struct drm_gem_object *obj = attachment->dmabuf->priv;
31 struct sg_table *sg;
32 dma_addr_t dma_addr;
33 int ret;
34
35 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
36 if (!sg)
37 return ERR_PTR(-ENOMEM);
38
39 /* camera, etc, need physically contiguous.. but we need a
40 * better way to know this..
41 */
42 ret = omap_gem_pin(obj, &dma_addr);
43 if (ret)
44 goto out;
45
46 ret = sg_alloc_table(sg, 1, GFP_KERNEL);
47 if (ret)
48 goto out;
49
50 sg_init_table(sg->sgl, 1);
51 sg_dma_len(sg->sgl) = obj->size;
52 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
53 sg_dma_address(sg->sgl) = dma_addr;
54
55 /* this must be after omap_gem_pin() to ensure we have pages attached */
56 omap_gem_dma_sync_buffer(obj, dir);
57
58 return sg;
59out:
60 kfree(sg);
61 return ERR_PTR(ret);
62}
63
64static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
65 struct sg_table *sg, enum dma_data_direction dir)
66{
67 struct drm_gem_object *obj = attachment->dmabuf->priv;
68 omap_gem_unpin(obj);
69 sg_free_table(sg);
70 kfree(sg);
71}
72
73static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
74 enum dma_data_direction dir)
75{
76 struct drm_gem_object *obj = buffer->priv;
77 struct page **pages;
78 if (omap_gem_flags(obj) & OMAP_BO_TILED) {
79 /* TODO we would need to pin at least part of the buffer to
80 * get de-tiled view. For now just reject it.
81 */
82 return -ENOMEM;
83 }
84 /* make sure we have the pages: */
85 return omap_gem_get_pages(obj, &pages, true);
86}
87
88static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
89 enum dma_data_direction dir)
90{
91 struct drm_gem_object *obj = buffer->priv;
92 omap_gem_put_pages(obj);
93 return 0;
94}
95
96
97static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
98 unsigned long page_num)
99{
100 struct drm_gem_object *obj = buffer->priv;
101 struct page **pages;
102 omap_gem_get_pages(obj, &pages, false);
103 omap_gem_cpu_sync_page(obj, page_num);
104 return kmap_atomic(pages[page_num]);
105}
106
107static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
108 unsigned long page_num, void *addr)
109{
110 kunmap_atomic(addr);
111}
112
113static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
114 unsigned long page_num)
115{
116 struct drm_gem_object *obj = buffer->priv;
117 struct page **pages;
118 omap_gem_get_pages(obj, &pages, false);
119 omap_gem_cpu_sync_page(obj, page_num);
120 return kmap(pages[page_num]);
121}
122
123static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
124 unsigned long page_num, void *addr)
125{
126 struct drm_gem_object *obj = buffer->priv;
127 struct page **pages;
128 omap_gem_get_pages(obj, &pages, false);
129 kunmap(pages[page_num]);
130}
131
132static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
133 struct vm_area_struct *vma)
134{
135 struct drm_gem_object *obj = buffer->priv;
136 int ret = 0;
137
138 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
139 if (ret < 0)
140 return ret;
141
142 return omap_gem_mmap_obj(obj, vma);
143}
144
145static const struct dma_buf_ops omap_dmabuf_ops = {
146 .map_dma_buf = omap_gem_map_dma_buf,
147 .unmap_dma_buf = omap_gem_unmap_dma_buf,
148 .release = drm_gem_dmabuf_release,
149 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
150 .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
151 .map_atomic = omap_gem_dmabuf_kmap_atomic,
152 .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
153 .map = omap_gem_dmabuf_kmap,
154 .unmap = omap_gem_dmabuf_kunmap,
155 .mmap = omap_gem_dmabuf_mmap,
156};
157
158struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
159 struct drm_gem_object *obj, int flags)
160{
161 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
162
163 exp_info.ops = &omap_dmabuf_ops;
164 exp_info.size = obj->size;
165 exp_info.flags = flags;
166 exp_info.priv = obj;
167
168 return drm_gem_dmabuf_export(dev, &exp_info);
169}
170
171/* -----------------------------------------------------------------------------
172 * DMABUF Import
173 */
174
175struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
176 struct dma_buf *dma_buf)
177{
178 struct dma_buf_attachment *attach;
179 struct drm_gem_object *obj;
180 struct sg_table *sgt;
181 int ret;
182
183 if (dma_buf->ops == &omap_dmabuf_ops) {
184 obj = dma_buf->priv;
185 if (obj->dev == dev) {
186 /*
187 * Importing dmabuf exported from out own gem increases
188 * refcount on gem itself instead of f_count of dmabuf.
189 */
190 drm_gem_object_reference(obj);
191 return obj;
192 }
193 }
194
195 attach = dma_buf_attach(dma_buf, dev->dev);
196 if (IS_ERR(attach))
197 return ERR_CAST(attach);
198
199 get_dma_buf(dma_buf);
200
201 sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
202 if (IS_ERR(sgt)) {
203 ret = PTR_ERR(sgt);
204 goto fail_detach;
205 }
206
207 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
208 if (IS_ERR(obj)) {
209 ret = PTR_ERR(obj);
210 goto fail_unmap;
211 }
212
213 obj->import_attach = attach;
214
215 return obj;
216
217fail_unmap:
218 dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
219fail_detach:
220 dma_buf_detach(dma_buf, attach);
221 dma_buf_put(dma_buf);
222
223 return ERR_PTR(ret);
224}