Loading...
Note: File does not exist in v3.1.
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2012 Red Hat Inc
5 */
6
7#include <linux/dma-buf.h>
8#include <linux/highmem.h>
9#include <linux/dma-resv.h>
10
11#include "i915_drv.h"
12#include "i915_gem_object.h"
13#include "i915_scatterlist.h"
14
15static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
16{
17 return to_intel_bo(buf->priv);
18}
19
20static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
21 enum dma_data_direction dir)
22{
23 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
24 struct sg_table *st;
25 struct scatterlist *src, *dst;
26 int ret, i;
27
28 ret = i915_gem_object_pin_pages(obj);
29 if (ret)
30 goto err;
31
32 /* Copy sg so that we make an independent mapping */
33 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
34 if (st == NULL) {
35 ret = -ENOMEM;
36 goto err_unpin_pages;
37 }
38
39 ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
40 if (ret)
41 goto err_free;
42
43 src = obj->mm.pages->sgl;
44 dst = st->sgl;
45 for (i = 0; i < obj->mm.pages->nents; i++) {
46 sg_set_page(dst, sg_page(src), src->length, 0);
47 dst = sg_next(dst);
48 src = sg_next(src);
49 }
50
51 if (!dma_map_sg_attrs(attachment->dev,
52 st->sgl, st->nents, dir,
53 DMA_ATTR_SKIP_CPU_SYNC)) {
54 ret = -ENOMEM;
55 goto err_free_sg;
56 }
57
58 return st;
59
60err_free_sg:
61 sg_free_table(st);
62err_free:
63 kfree(st);
64err_unpin_pages:
65 i915_gem_object_unpin_pages(obj);
66err:
67 return ERR_PTR(ret);
68}
69
70static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
71 struct sg_table *sg,
72 enum dma_data_direction dir)
73{
74 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
75
76 dma_unmap_sg_attrs(attachment->dev,
77 sg->sgl, sg->nents, dir,
78 DMA_ATTR_SKIP_CPU_SYNC);
79 sg_free_table(sg);
80 kfree(sg);
81
82 i915_gem_object_unpin_pages(obj);
83}
84
85static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
86{
87 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
88
89 return i915_gem_object_pin_map(obj, I915_MAP_WB);
90}
91
92static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
93{
94 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
95
96 i915_gem_object_flush_map(obj);
97 i915_gem_object_unpin_map(obj);
98}
99
100static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
101{
102 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
103 int ret;
104
105 if (obj->base.size < vma->vm_end - vma->vm_start)
106 return -EINVAL;
107
108 if (!obj->base.filp)
109 return -ENODEV;
110
111 ret = call_mmap(obj->base.filp, vma);
112 if (ret)
113 return ret;
114
115 fput(vma->vm_file);
116 vma->vm_file = get_file(obj->base.filp);
117
118 return 0;
119}
120
121static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
122{
123 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
124 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
125 int err;
126
127 err = i915_gem_object_pin_pages(obj);
128 if (err)
129 return err;
130
131 err = i915_gem_object_lock_interruptible(obj);
132 if (err)
133 goto out;
134
135 err = i915_gem_object_set_to_cpu_domain(obj, write);
136 i915_gem_object_unlock(obj);
137
138out:
139 i915_gem_object_unpin_pages(obj);
140 return err;
141}
142
143static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
144{
145 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
146 int err;
147
148 err = i915_gem_object_pin_pages(obj);
149 if (err)
150 return err;
151
152 err = i915_gem_object_lock_interruptible(obj);
153 if (err)
154 goto out;
155
156 err = i915_gem_object_set_to_gtt_domain(obj, false);
157 i915_gem_object_unlock(obj);
158
159out:
160 i915_gem_object_unpin_pages(obj);
161 return err;
162}
163
164static const struct dma_buf_ops i915_dmabuf_ops = {
165 .map_dma_buf = i915_gem_map_dma_buf,
166 .unmap_dma_buf = i915_gem_unmap_dma_buf,
167 .release = drm_gem_dmabuf_release,
168 .mmap = i915_gem_dmabuf_mmap,
169 .vmap = i915_gem_dmabuf_vmap,
170 .vunmap = i915_gem_dmabuf_vunmap,
171 .begin_cpu_access = i915_gem_begin_cpu_access,
172 .end_cpu_access = i915_gem_end_cpu_access,
173};
174
175struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
176{
177 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
178 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
179
180 exp_info.ops = &i915_dmabuf_ops;
181 exp_info.size = gem_obj->size;
182 exp_info.flags = flags;
183 exp_info.priv = gem_obj;
184 exp_info.resv = obj->base.resv;
185
186 if (obj->ops->dmabuf_export) {
187 int ret = obj->ops->dmabuf_export(obj);
188 if (ret)
189 return ERR_PTR(ret);
190 }
191
192 return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
193}
194
195static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
196{
197 struct sg_table *pages;
198 unsigned int sg_page_sizes;
199
200 pages = dma_buf_map_attachment(obj->base.import_attach,
201 DMA_BIDIRECTIONAL);
202 if (IS_ERR(pages))
203 return PTR_ERR(pages);
204
205 sg_page_sizes = i915_sg_page_sizes(pages->sgl);
206
207 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
208
209 return 0;
210}
211
212static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
213 struct sg_table *pages)
214{
215 dma_buf_unmap_attachment(obj->base.import_attach, pages,
216 DMA_BIDIRECTIONAL);
217}
218
219static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
220 .name = "i915_gem_object_dmabuf",
221 .get_pages = i915_gem_object_get_pages_dmabuf,
222 .put_pages = i915_gem_object_put_pages_dmabuf,
223};
224
225struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
226 struct dma_buf *dma_buf)
227{
228 static struct lock_class_key lock_class;
229 struct dma_buf_attachment *attach;
230 struct drm_i915_gem_object *obj;
231 int ret;
232
233 /* is this one of own objects? */
234 if (dma_buf->ops == &i915_dmabuf_ops) {
235 obj = dma_buf_to_obj(dma_buf);
236 /* is it from our device? */
237 if (obj->base.dev == dev) {
238 /*
239 * Importing dmabuf exported from out own gem increases
240 * refcount on gem itself instead of f_count of dmabuf.
241 */
242 return &i915_gem_object_get(obj)->base;
243 }
244 }
245
246 /* need to attach */
247 attach = dma_buf_attach(dma_buf, dev->dev);
248 if (IS_ERR(attach))
249 return ERR_CAST(attach);
250
251 get_dma_buf(dma_buf);
252
253 obj = i915_gem_object_alloc();
254 if (obj == NULL) {
255 ret = -ENOMEM;
256 goto fail_detach;
257 }
258
259 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
260 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
261 obj->base.import_attach = attach;
262 obj->base.resv = dma_buf->resv;
263
264 /* We use GTT as shorthand for a coherent domain, one that is
265 * neither in the GPU cache nor in the CPU cache, where all
266 * writes are immediately visible in memory. (That's not strictly
267 * true, but it's close! There are internal buffers such as the
268 * write-combined buffer or a delay through the chipset for GTT
269 * writes that do require us to treat GTT as a separate cache domain.)
270 */
271 obj->read_domains = I915_GEM_DOMAIN_GTT;
272 obj->write_domain = 0;
273
274 return &obj->base;
275
276fail_detach:
277 dma_buf_detach(dma_buf, attach);
278 dma_buf_put(dma_buf);
279
280 return ERR_PTR(ret);
281}
282
283#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
284#include "selftests/mock_dmabuf.c"
285#include "selftests/i915_gem_dmabuf.c"
286#endif