Loading...
1/*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26#include <drm/drmP.h>
27#include "i915_drv.h"
28#include <linux/dma-buf.h>
29
30static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31{
32 return to_intel_bo(buf->priv);
33}
34
35static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
36 enum dma_data_direction dir)
37{
38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
39 struct sg_table *st;
40 struct scatterlist *src, *dst;
41 int ret, i;
42
43 ret = i915_mutex_lock_interruptible(obj->base.dev);
44 if (ret)
45 goto err;
46
47 ret = i915_gem_object_get_pages(obj);
48 if (ret)
49 goto err_unlock;
50
51 i915_gem_object_pin_pages(obj);
52
53 /* Copy sg so that we make an independent mapping */
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
55 if (st == NULL) {
56 ret = -ENOMEM;
57 goto err_unpin;
58 }
59
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
61 if (ret)
62 goto err_free;
63
64 src = obj->pages->sgl;
65 dst = st->sgl;
66 for (i = 0; i < obj->pages->nents; i++) {
67 sg_set_page(dst, sg_page(src), src->length, 0);
68 dst = sg_next(dst);
69 src = sg_next(src);
70 }
71
72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
73 ret =-ENOMEM;
74 goto err_free_sg;
75 }
76
77 mutex_unlock(&obj->base.dev->struct_mutex);
78 return st;
79
80err_free_sg:
81 sg_free_table(st);
82err_free:
83 kfree(st);
84err_unpin:
85 i915_gem_object_unpin_pages(obj);
86err_unlock:
87 mutex_unlock(&obj->base.dev->struct_mutex);
88err:
89 return ERR_PTR(ret);
90}
91
92static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
93 struct sg_table *sg,
94 enum dma_data_direction dir)
95{
96 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
97
98 mutex_lock(&obj->base.dev->struct_mutex);
99
100 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
101 sg_free_table(sg);
102 kfree(sg);
103
104 i915_gem_object_unpin_pages(obj);
105
106 mutex_unlock(&obj->base.dev->struct_mutex);
107}
108
109static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
110{
111 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
112 struct drm_device *dev = obj->base.dev;
113 struct sg_page_iter sg_iter;
114 struct page **pages;
115 int ret, i;
116
117 ret = i915_mutex_lock_interruptible(dev);
118 if (ret)
119 return ERR_PTR(ret);
120
121 if (obj->dma_buf_vmapping) {
122 obj->vmapping_count++;
123 goto out_unlock;
124 }
125
126 ret = i915_gem_object_get_pages(obj);
127 if (ret)
128 goto err;
129
130 i915_gem_object_pin_pages(obj);
131
132 ret = -ENOMEM;
133
134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
135 if (pages == NULL)
136 goto err_unpin;
137
138 i = 0;
139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
140 pages[i++] = sg_page_iter_page(&sg_iter);
141
142 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
143 drm_free_large(pages);
144
145 if (!obj->dma_buf_vmapping)
146 goto err_unpin;
147
148 obj->vmapping_count = 1;
149out_unlock:
150 mutex_unlock(&dev->struct_mutex);
151 return obj->dma_buf_vmapping;
152
153err_unpin:
154 i915_gem_object_unpin_pages(obj);
155err:
156 mutex_unlock(&dev->struct_mutex);
157 return ERR_PTR(ret);
158}
159
160static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
161{
162 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
163 struct drm_device *dev = obj->base.dev;
164
165 mutex_lock(&dev->struct_mutex);
166 if (--obj->vmapping_count == 0) {
167 vunmap(obj->dma_buf_vmapping);
168 obj->dma_buf_vmapping = NULL;
169
170 i915_gem_object_unpin_pages(obj);
171 }
172 mutex_unlock(&dev->struct_mutex);
173}
174
175static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
176{
177 return NULL;
178}
179
180static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
181{
182
183}
184static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
185{
186 return NULL;
187}
188
189static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
190{
191
192}
193
194static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
195{
196 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
197 int ret;
198
199 if (obj->base.size < vma->vm_end - vma->vm_start)
200 return -EINVAL;
201
202 if (!obj->base.filp)
203 return -ENODEV;
204
205 ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
206 if (ret)
207 return ret;
208
209 fput(vma->vm_file);
210 vma->vm_file = get_file(obj->base.filp);
211
212 return 0;
213}
214
215static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
216{
217 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
218 struct drm_device *dev = obj->base.dev;
219 int ret;
220 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
221
222 ret = i915_mutex_lock_interruptible(dev);
223 if (ret)
224 return ret;
225
226 ret = i915_gem_object_set_to_cpu_domain(obj, write);
227 mutex_unlock(&dev->struct_mutex);
228 return ret;
229}
230
231static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
232{
233 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
234 struct drm_device *dev = obj->base.dev;
235 int ret;
236
237 ret = i915_mutex_lock_interruptible(dev);
238 if (ret)
239 return ret;
240
241 ret = i915_gem_object_set_to_gtt_domain(obj, false);
242 mutex_unlock(&dev->struct_mutex);
243
244 return ret;
245}
246
247static const struct dma_buf_ops i915_dmabuf_ops = {
248 .map_dma_buf = i915_gem_map_dma_buf,
249 .unmap_dma_buf = i915_gem_unmap_dma_buf,
250 .release = drm_gem_dmabuf_release,
251 .kmap = i915_gem_dmabuf_kmap,
252 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
253 .kunmap = i915_gem_dmabuf_kunmap,
254 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
255 .mmap = i915_gem_dmabuf_mmap,
256 .vmap = i915_gem_dmabuf_vmap,
257 .vunmap = i915_gem_dmabuf_vunmap,
258 .begin_cpu_access = i915_gem_begin_cpu_access,
259 .end_cpu_access = i915_gem_end_cpu_access,
260};
261
262struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
263 struct drm_gem_object *gem_obj, int flags)
264{
265 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
266 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
267
268 exp_info.ops = &i915_dmabuf_ops;
269 exp_info.size = gem_obj->size;
270 exp_info.flags = flags;
271 exp_info.priv = gem_obj;
272
273
274 if (obj->ops->dmabuf_export) {
275 int ret = obj->ops->dmabuf_export(obj);
276 if (ret)
277 return ERR_PTR(ret);
278 }
279
280 return dma_buf_export(&exp_info);
281}
282
283static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
284{
285 struct sg_table *sg;
286
287 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
288 if (IS_ERR(sg))
289 return PTR_ERR(sg);
290
291 obj->pages = sg;
292 return 0;
293}
294
295static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
296{
297 dma_buf_unmap_attachment(obj->base.import_attach,
298 obj->pages, DMA_BIDIRECTIONAL);
299}
300
301static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
302 .get_pages = i915_gem_object_get_pages_dmabuf,
303 .put_pages = i915_gem_object_put_pages_dmabuf,
304};
305
306struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
307 struct dma_buf *dma_buf)
308{
309 struct dma_buf_attachment *attach;
310 struct drm_i915_gem_object *obj;
311 int ret;
312
313 /* is this one of own objects? */
314 if (dma_buf->ops == &i915_dmabuf_ops) {
315 obj = dma_buf_to_obj(dma_buf);
316 /* is it from our device? */
317 if (obj->base.dev == dev) {
318 /*
319 * Importing dmabuf exported from out own gem increases
320 * refcount on gem itself instead of f_count of dmabuf.
321 */
322 drm_gem_object_reference(&obj->base);
323 return &obj->base;
324 }
325 }
326
327 /* need to attach */
328 attach = dma_buf_attach(dma_buf, dev->dev);
329 if (IS_ERR(attach))
330 return ERR_CAST(attach);
331
332 get_dma_buf(dma_buf);
333
334 obj = i915_gem_object_alloc(dev);
335 if (obj == NULL) {
336 ret = -ENOMEM;
337 goto fail_detach;
338 }
339
340 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
341 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
342 obj->base.import_attach = attach;
343
344 return &obj->base;
345
346fail_detach:
347 dma_buf_detach(dma_buf, attach);
348 dma_buf_put(dma_buf);
349
350 return ERR_PTR(ret);
351}
1/*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26
27#include <linux/dma-buf.h>
28#include <linux/reservation.h>
29
30#include <drm/drmP.h>
31
32#include "i915_drv.h"
33
34static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
35{
36 return to_intel_bo(buf->priv);
37}
38
39static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
40 enum dma_data_direction dir)
41{
42 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
43 struct sg_table *st;
44 struct scatterlist *src, *dst;
45 int ret, i;
46
47 ret = i915_gem_object_pin_pages(obj);
48 if (ret)
49 goto err;
50
51 /* Copy sg so that we make an independent mapping */
52 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
53 if (st == NULL) {
54 ret = -ENOMEM;
55 goto err_unpin_pages;
56 }
57
58 ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
59 if (ret)
60 goto err_free;
61
62 src = obj->mm.pages->sgl;
63 dst = st->sgl;
64 for (i = 0; i < obj->mm.pages->nents; i++) {
65 sg_set_page(dst, sg_page(src), src->length, 0);
66 dst = sg_next(dst);
67 src = sg_next(src);
68 }
69
70 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71 ret = -ENOMEM;
72 goto err_free_sg;
73 }
74
75 return st;
76
77err_free_sg:
78 sg_free_table(st);
79err_free:
80 kfree(st);
81err_unpin_pages:
82 i915_gem_object_unpin_pages(obj);
83err:
84 return ERR_PTR(ret);
85}
86
87static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
88 struct sg_table *sg,
89 enum dma_data_direction dir)
90{
91 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
92
93 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
94 sg_free_table(sg);
95 kfree(sg);
96
97 i915_gem_object_unpin_pages(obj);
98}
99
100static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
101{
102 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
103
104 return i915_gem_object_pin_map(obj, I915_MAP_WB);
105}
106
107static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
108{
109 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
110
111 i915_gem_object_unpin_map(obj);
112}
113
114static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
115{
116 return NULL;
117}
118
119static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
120{
121
122}
123static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
124{
125 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
126 struct page *page;
127
128 if (page_num >= obj->base.size >> PAGE_SHIFT)
129 return NULL;
130
131 if (!i915_gem_object_has_struct_page(obj))
132 return NULL;
133
134 if (i915_gem_object_pin_pages(obj))
135 return NULL;
136
137 /* Synchronisation is left to the caller (via .begin_cpu_access()) */
138 page = i915_gem_object_get_page(obj, page_num);
139 if (IS_ERR(page))
140 goto err_unpin;
141
142 return kmap(page);
143
144err_unpin:
145 i915_gem_object_unpin_pages(obj);
146 return NULL;
147}
148
149static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
150{
151 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
152
153 kunmap(virt_to_page(addr));
154 i915_gem_object_unpin_pages(obj);
155}
156
157static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
158{
159 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
160 int ret;
161
162 if (obj->base.size < vma->vm_end - vma->vm_start)
163 return -EINVAL;
164
165 if (!obj->base.filp)
166 return -ENODEV;
167
168 ret = call_mmap(obj->base.filp, vma);
169 if (ret)
170 return ret;
171
172 fput(vma->vm_file);
173 vma->vm_file = get_file(obj->base.filp);
174
175 return 0;
176}
177
178static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
179{
180 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
181 struct drm_device *dev = obj->base.dev;
182 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
183 int err;
184
185 err = i915_gem_object_pin_pages(obj);
186 if (err)
187 return err;
188
189 err = i915_mutex_lock_interruptible(dev);
190 if (err)
191 goto out;
192
193 err = i915_gem_object_set_to_cpu_domain(obj, write);
194 mutex_unlock(&dev->struct_mutex);
195
196out:
197 i915_gem_object_unpin_pages(obj);
198 return err;
199}
200
201static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
202{
203 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
204 struct drm_device *dev = obj->base.dev;
205 int err;
206
207 err = i915_gem_object_pin_pages(obj);
208 if (err)
209 return err;
210
211 err = i915_mutex_lock_interruptible(dev);
212 if (err)
213 goto out;
214
215 err = i915_gem_object_set_to_gtt_domain(obj, false);
216 mutex_unlock(&dev->struct_mutex);
217
218out:
219 i915_gem_object_unpin_pages(obj);
220 return err;
221}
222
223static const struct dma_buf_ops i915_dmabuf_ops = {
224 .map_dma_buf = i915_gem_map_dma_buf,
225 .unmap_dma_buf = i915_gem_unmap_dma_buf,
226 .release = drm_gem_dmabuf_release,
227 .map = i915_gem_dmabuf_kmap,
228 .map_atomic = i915_gem_dmabuf_kmap_atomic,
229 .unmap = i915_gem_dmabuf_kunmap,
230 .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
231 .mmap = i915_gem_dmabuf_mmap,
232 .vmap = i915_gem_dmabuf_vmap,
233 .vunmap = i915_gem_dmabuf_vunmap,
234 .begin_cpu_access = i915_gem_begin_cpu_access,
235 .end_cpu_access = i915_gem_end_cpu_access,
236};
237
238struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
239 struct drm_gem_object *gem_obj, int flags)
240{
241 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
242 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
243
244 exp_info.ops = &i915_dmabuf_ops;
245 exp_info.size = gem_obj->size;
246 exp_info.flags = flags;
247 exp_info.priv = gem_obj;
248 exp_info.resv = obj->resv;
249
250 if (obj->ops->dmabuf_export) {
251 int ret = obj->ops->dmabuf_export(obj);
252 if (ret)
253 return ERR_PTR(ret);
254 }
255
256 return drm_gem_dmabuf_export(dev, &exp_info);
257}
258
259static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
260{
261 struct sg_table *pages;
262 unsigned int sg_page_sizes;
263
264 pages = dma_buf_map_attachment(obj->base.import_attach,
265 DMA_BIDIRECTIONAL);
266 if (IS_ERR(pages))
267 return PTR_ERR(pages);
268
269 sg_page_sizes = i915_sg_page_sizes(pages->sgl);
270
271 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
272
273 return 0;
274}
275
276static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
277 struct sg_table *pages)
278{
279 dma_buf_unmap_attachment(obj->base.import_attach, pages,
280 DMA_BIDIRECTIONAL);
281}
282
283static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
284 .get_pages = i915_gem_object_get_pages_dmabuf,
285 .put_pages = i915_gem_object_put_pages_dmabuf,
286};
287
288struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
289 struct dma_buf *dma_buf)
290{
291 struct dma_buf_attachment *attach;
292 struct drm_i915_gem_object *obj;
293 int ret;
294
295 /* is this one of own objects? */
296 if (dma_buf->ops == &i915_dmabuf_ops) {
297 obj = dma_buf_to_obj(dma_buf);
298 /* is it from our device? */
299 if (obj->base.dev == dev) {
300 /*
301 * Importing dmabuf exported from out own gem increases
302 * refcount on gem itself instead of f_count of dmabuf.
303 */
304 return &i915_gem_object_get(obj)->base;
305 }
306 }
307
308 /* need to attach */
309 attach = dma_buf_attach(dma_buf, dev->dev);
310 if (IS_ERR(attach))
311 return ERR_CAST(attach);
312
313 get_dma_buf(dma_buf);
314
315 obj = i915_gem_object_alloc(to_i915(dev));
316 if (obj == NULL) {
317 ret = -ENOMEM;
318 goto fail_detach;
319 }
320
321 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
322 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
323 obj->base.import_attach = attach;
324 obj->resv = dma_buf->resv;
325
326 /* We use GTT as shorthand for a coherent domain, one that is
327 * neither in the GPU cache nor in the CPU cache, where all
328 * writes are immediately visible in memory. (That's not strictly
329 * true, but it's close! There are internal buffers such as the
330 * write-combined buffer or a delay through the chipset for GTT
331 * writes that do require us to treat GTT as a separate cache domain.)
332 */
333 obj->read_domains = I915_GEM_DOMAIN_GTT;
334 obj->write_domain = 0;
335
336 return &obj->base;
337
338fail_detach:
339 dma_buf_detach(dma_buf, attach);
340 dma_buf_put(dma_buf);
341
342 return ERR_PTR(ret);
343}
344
345#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
346#include "selftests/mock_dmabuf.c"
347#include "selftests/i915_gem_dmabuf.c"
348#endif