Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "xe_dma_buf.h"
7
8#include <kunit/test.h>
9#include <linux/dma-buf.h>
10#include <linux/pci-p2pdma.h>
11
12#include <drm/drm_device.h>
13#include <drm/drm_prime.h>
14#include <drm/ttm/ttm_tt.h>
15
16#include "tests/xe_test.h"
17#include "xe_bo.h"
18#include "xe_device.h"
19#include "xe_pm.h"
20#include "xe_ttm_vram_mgr.h"
21#include "xe_vm.h"
22
23MODULE_IMPORT_NS("DMA_BUF");
24
25static int xe_dma_buf_attach(struct dma_buf *dmabuf,
26 struct dma_buf_attachment *attach)
27{
28 struct drm_gem_object *obj = attach->dmabuf->priv;
29
30 if (attach->peer2peer &&
31 pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0)
32 attach->peer2peer = false;
33
34 if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
35 return -EOPNOTSUPP;
36
37 xe_pm_runtime_get(to_xe_device(obj->dev));
38 return 0;
39}
40
41static void xe_dma_buf_detach(struct dma_buf *dmabuf,
42 struct dma_buf_attachment *attach)
43{
44 struct drm_gem_object *obj = attach->dmabuf->priv;
45
46 xe_pm_runtime_put(to_xe_device(obj->dev));
47}
48
49static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
50{
51 struct drm_gem_object *obj = attach->dmabuf->priv;
52 struct xe_bo *bo = gem_to_xe_bo(obj);
53 struct xe_device *xe = xe_bo_device(bo);
54 int ret;
55
56 /*
57 * For now only support pinning in TT memory, for two reasons:
58 * 1) Avoid pinning in a placement not accessible to some importers.
59 * 2) Pinning in VRAM requires PIN accounting which is a to-do.
60 */
61 if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
62 drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
63 return -EINVAL;
64 }
65
66 ret = xe_bo_migrate(bo, XE_PL_TT);
67 if (ret) {
68 if (ret != -EINTR && ret != -ERESTARTSYS)
69 drm_dbg(&xe->drm,
70 "Failed migrating dma-buf to TT memory: %pe\n",
71 ERR_PTR(ret));
72 return ret;
73 }
74
75 ret = xe_bo_pin_external(bo);
76 xe_assert(xe, !ret);
77
78 return 0;
79}
80
81static void xe_dma_buf_unpin(struct dma_buf_attachment *attach)
82{
83 struct drm_gem_object *obj = attach->dmabuf->priv;
84 struct xe_bo *bo = gem_to_xe_bo(obj);
85
86 xe_bo_unpin_external(bo);
87}
88
89static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
90 enum dma_data_direction dir)
91{
92 struct dma_buf *dma_buf = attach->dmabuf;
93 struct drm_gem_object *obj = dma_buf->priv;
94 struct xe_bo *bo = gem_to_xe_bo(obj);
95 struct sg_table *sgt;
96 int r = 0;
97
98 if (!attach->peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT))
99 return ERR_PTR(-EOPNOTSUPP);
100
101 if (!xe_bo_is_pinned(bo)) {
102 if (!attach->peer2peer)
103 r = xe_bo_migrate(bo, XE_PL_TT);
104 else
105 r = xe_bo_validate(bo, NULL, false);
106 if (r)
107 return ERR_PTR(r);
108 }
109
110 switch (bo->ttm.resource->mem_type) {
111 case XE_PL_TT:
112 sgt = drm_prime_pages_to_sg(obj->dev,
113 bo->ttm.ttm->pages,
114 bo->ttm.ttm->num_pages);
115 if (IS_ERR(sgt))
116 return sgt;
117
118 if (dma_map_sgtable(attach->dev, sgt, dir,
119 DMA_ATTR_SKIP_CPU_SYNC))
120 goto error_free;
121 break;
122
123 case XE_PL_VRAM0:
124 case XE_PL_VRAM1:
125 r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo),
126 bo->ttm.resource, 0,
127 bo->ttm.base.size, attach->dev,
128 dir, &sgt);
129 if (r)
130 return ERR_PTR(r);
131 break;
132 default:
133 return ERR_PTR(-EINVAL);
134 }
135
136 return sgt;
137
138error_free:
139 sg_free_table(sgt);
140 kfree(sgt);
141 return ERR_PTR(-EBUSY);
142}
143
144static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
145 struct sg_table *sgt,
146 enum dma_data_direction dir)
147{
148 struct dma_buf *dma_buf = attach->dmabuf;
149 struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv);
150
151 if (!xe_bo_is_vram(bo)) {
152 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
153 sg_free_table(sgt);
154 kfree(sgt);
155 } else {
156 xe_ttm_vram_mgr_free_sgt(attach->dev, dir, sgt);
157 }
158}
159
160static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
161 enum dma_data_direction direction)
162{
163 struct drm_gem_object *obj = dma_buf->priv;
164 struct xe_bo *bo = gem_to_xe_bo(obj);
165 bool reads = (direction == DMA_BIDIRECTIONAL ||
166 direction == DMA_FROM_DEVICE);
167
168 if (!reads)
169 return 0;
170
171 /* Can we do interruptible lock here? */
172 xe_bo_lock(bo, false);
173 (void)xe_bo_migrate(bo, XE_PL_TT);
174 xe_bo_unlock(bo);
175
176 return 0;
177}
178
179static const struct dma_buf_ops xe_dmabuf_ops = {
180 .attach = xe_dma_buf_attach,
181 .detach = xe_dma_buf_detach,
182 .pin = xe_dma_buf_pin,
183 .unpin = xe_dma_buf_unpin,
184 .map_dma_buf = xe_dma_buf_map,
185 .unmap_dma_buf = xe_dma_buf_unmap,
186 .release = drm_gem_dmabuf_release,
187 .begin_cpu_access = xe_dma_buf_begin_cpu_access,
188 .mmap = drm_gem_dmabuf_mmap,
189 .vmap = drm_gem_dmabuf_vmap,
190 .vunmap = drm_gem_dmabuf_vunmap,
191};
192
193struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
194{
195 struct xe_bo *bo = gem_to_xe_bo(obj);
196 struct dma_buf *buf;
197
198 if (bo->vm)
199 return ERR_PTR(-EPERM);
200
201 buf = drm_gem_prime_export(obj, flags);
202 if (!IS_ERR(buf))
203 buf->ops = &xe_dmabuf_ops;
204
205 return buf;
206}
207
208static struct drm_gem_object *
209xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
210 struct dma_buf *dma_buf)
211{
212 struct dma_resv *resv = dma_buf->resv;
213 struct xe_device *xe = to_xe_device(dev);
214 struct xe_bo *bo;
215 int ret;
216
217 dma_resv_lock(resv, NULL);
218 bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
219 0, /* Will require 1way or 2way for vm_bind */
220 ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
221 if (IS_ERR(bo)) {
222 ret = PTR_ERR(bo);
223 goto error;
224 }
225 dma_resv_unlock(resv);
226
227 return &bo->ttm.base;
228
229error:
230 dma_resv_unlock(resv);
231 return ERR_PTR(ret);
232}
233
234static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
235{
236 struct drm_gem_object *obj = attach->importer_priv;
237 struct xe_bo *bo = gem_to_xe_bo(obj);
238
239 XE_WARN_ON(xe_bo_evict(bo, false));
240}
241
242static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
243 .allow_peer2peer = true,
244 .move_notify = xe_dma_buf_move_notify
245};
246
247#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
248
249struct dma_buf_test_params {
250 struct xe_test_priv base;
251 const struct dma_buf_attach_ops *attach_ops;
252 bool force_different_devices;
253 u32 mem_mask;
254};
255
256#define to_dma_buf_test_params(_priv) \
257 container_of(_priv, struct dma_buf_test_params, base)
258#endif
259
260struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
261 struct dma_buf *dma_buf)
262{
263 XE_TEST_DECLARE(struct dma_buf_test_params *test =
264 to_dma_buf_test_params
265 (xe_cur_kunit_priv(XE_TEST_LIVE_DMA_BUF));)
266 const struct dma_buf_attach_ops *attach_ops;
267 struct dma_buf_attachment *attach;
268 struct drm_gem_object *obj;
269 struct xe_bo *bo;
270
271 if (dma_buf->ops == &xe_dmabuf_ops) {
272 obj = dma_buf->priv;
273 if (obj->dev == dev &&
274 !XE_TEST_ONLY(test && test->force_different_devices)) {
275 /*
276 * Importing dmabuf exported from out own gem increases
277 * refcount on gem itself instead of f_count of dmabuf.
278 */
279 drm_gem_object_get(obj);
280 return obj;
281 }
282 }
283
284 /*
285 * Don't publish the bo until we have a valid attachment, and a
286 * valid attachment needs the bo address. So pre-create a bo before
287 * creating the attachment and publish.
288 */
289 bo = xe_bo_alloc();
290 if (IS_ERR(bo))
291 return ERR_CAST(bo);
292
293 attach_ops = &xe_dma_buf_attach_ops;
294#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
295 if (test)
296 attach_ops = test->attach_ops;
297#endif
298
299 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base);
300 if (IS_ERR(attach)) {
301 obj = ERR_CAST(attach);
302 goto out_err;
303 }
304
305 /* Errors here will take care of freeing the bo. */
306 obj = xe_dma_buf_init_obj(dev, bo, dma_buf);
307 if (IS_ERR(obj))
308 return obj;
309
310
311 get_dma_buf(dma_buf);
312 obj->import_attach = attach;
313 return obj;
314
315out_err:
316 xe_bo_free(bo);
317
318 return obj;
319}
320
321#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
322#include "tests/xe_dma_buf.c"
323#endif
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "xe_dma_buf.h"
7
8#include <kunit/test.h>
9#include <linux/dma-buf.h>
10#include <linux/pci-p2pdma.h>
11
12#include <drm/drm_device.h>
13#include <drm/drm_prime.h>
14#include <drm/ttm/ttm_tt.h>
15
16#include "tests/xe_test.h"
17#include "xe_bo.h"
18#include "xe_device.h"
19#include "xe_ttm_vram_mgr.h"
20#include "xe_vm.h"
21
22MODULE_IMPORT_NS(DMA_BUF);
23
24static int xe_dma_buf_attach(struct dma_buf *dmabuf,
25 struct dma_buf_attachment *attach)
26{
27 struct drm_gem_object *obj = attach->dmabuf->priv;
28
29 if (attach->peer2peer &&
30 pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0)
31 attach->peer2peer = false;
32
33 if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
34 return -EOPNOTSUPP;
35
36 xe_device_mem_access_get(to_xe_device(obj->dev));
37 return 0;
38}
39
40static void xe_dma_buf_detach(struct dma_buf *dmabuf,
41 struct dma_buf_attachment *attach)
42{
43 struct drm_gem_object *obj = attach->dmabuf->priv;
44
45 xe_device_mem_access_put(to_xe_device(obj->dev));
46}
47
48static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
49{
50 struct drm_gem_object *obj = attach->dmabuf->priv;
51 struct xe_bo *bo = gem_to_xe_bo(obj);
52 struct xe_device *xe = xe_bo_device(bo);
53 int ret;
54
55 /*
56 * For now only support pinning in TT memory, for two reasons:
57 * 1) Avoid pinning in a placement not accessible to some importers.
58 * 2) Pinning in VRAM requires PIN accounting which is a to-do.
59 */
60 if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
61 drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
62 return -EINVAL;
63 }
64
65 ret = xe_bo_migrate(bo, XE_PL_TT);
66 if (ret) {
67 if (ret != -EINTR && ret != -ERESTARTSYS)
68 drm_dbg(&xe->drm,
69 "Failed migrating dma-buf to TT memory: %pe\n",
70 ERR_PTR(ret));
71 return ret;
72 }
73
74 ret = xe_bo_pin_external(bo);
75 xe_assert(xe, !ret);
76
77 return 0;
78}
79
80static void xe_dma_buf_unpin(struct dma_buf_attachment *attach)
81{
82 struct drm_gem_object *obj = attach->dmabuf->priv;
83 struct xe_bo *bo = gem_to_xe_bo(obj);
84
85 xe_bo_unpin_external(bo);
86}
87
88static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
89 enum dma_data_direction dir)
90{
91 struct dma_buf *dma_buf = attach->dmabuf;
92 struct drm_gem_object *obj = dma_buf->priv;
93 struct xe_bo *bo = gem_to_xe_bo(obj);
94 struct sg_table *sgt;
95 int r = 0;
96
97 if (!attach->peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT))
98 return ERR_PTR(-EOPNOTSUPP);
99
100 if (!xe_bo_is_pinned(bo)) {
101 if (!attach->peer2peer)
102 r = xe_bo_migrate(bo, XE_PL_TT);
103 else
104 r = xe_bo_validate(bo, NULL, false);
105 if (r)
106 return ERR_PTR(r);
107 }
108
109 switch (bo->ttm.resource->mem_type) {
110 case XE_PL_TT:
111 sgt = drm_prime_pages_to_sg(obj->dev,
112 bo->ttm.ttm->pages,
113 bo->ttm.ttm->num_pages);
114 if (IS_ERR(sgt))
115 return sgt;
116
117 if (dma_map_sgtable(attach->dev, sgt, dir,
118 DMA_ATTR_SKIP_CPU_SYNC))
119 goto error_free;
120 break;
121
122 case XE_PL_VRAM0:
123 case XE_PL_VRAM1:
124 r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo),
125 bo->ttm.resource, 0,
126 bo->ttm.base.size, attach->dev,
127 dir, &sgt);
128 if (r)
129 return ERR_PTR(r);
130 break;
131 default:
132 return ERR_PTR(-EINVAL);
133 }
134
135 return sgt;
136
137error_free:
138 sg_free_table(sgt);
139 kfree(sgt);
140 return ERR_PTR(-EBUSY);
141}
142
143static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
144 struct sg_table *sgt,
145 enum dma_data_direction dir)
146{
147 struct dma_buf *dma_buf = attach->dmabuf;
148 struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv);
149
150 if (!xe_bo_is_vram(bo)) {
151 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
152 sg_free_table(sgt);
153 kfree(sgt);
154 } else {
155 xe_ttm_vram_mgr_free_sgt(attach->dev, dir, sgt);
156 }
157}
158
159static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
160 enum dma_data_direction direction)
161{
162 struct drm_gem_object *obj = dma_buf->priv;
163 struct xe_bo *bo = gem_to_xe_bo(obj);
164 bool reads = (direction == DMA_BIDIRECTIONAL ||
165 direction == DMA_FROM_DEVICE);
166
167 if (!reads)
168 return 0;
169
170 /* Can we do interruptible lock here? */
171 xe_bo_lock(bo, false);
172 (void)xe_bo_migrate(bo, XE_PL_TT);
173 xe_bo_unlock(bo);
174
175 return 0;
176}
177
178static const struct dma_buf_ops xe_dmabuf_ops = {
179 .attach = xe_dma_buf_attach,
180 .detach = xe_dma_buf_detach,
181 .pin = xe_dma_buf_pin,
182 .unpin = xe_dma_buf_unpin,
183 .map_dma_buf = xe_dma_buf_map,
184 .unmap_dma_buf = xe_dma_buf_unmap,
185 .release = drm_gem_dmabuf_release,
186 .begin_cpu_access = xe_dma_buf_begin_cpu_access,
187 .mmap = drm_gem_dmabuf_mmap,
188 .vmap = drm_gem_dmabuf_vmap,
189 .vunmap = drm_gem_dmabuf_vunmap,
190};
191
192struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
193{
194 struct xe_bo *bo = gem_to_xe_bo(obj);
195 struct dma_buf *buf;
196
197 if (bo->vm)
198 return ERR_PTR(-EPERM);
199
200 buf = drm_gem_prime_export(obj, flags);
201 if (!IS_ERR(buf))
202 buf->ops = &xe_dmabuf_ops;
203
204 return buf;
205}
206
207static struct drm_gem_object *
208xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
209 struct dma_buf *dma_buf)
210{
211 struct dma_resv *resv = dma_buf->resv;
212 struct xe_device *xe = to_xe_device(dev);
213 struct xe_bo *bo;
214 int ret;
215
216 dma_resv_lock(resv, NULL);
217 bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
218 0, /* Will require 1way or 2way for vm_bind */
219 ttm_bo_type_sg, XE_BO_CREATE_SYSTEM_BIT);
220 if (IS_ERR(bo)) {
221 ret = PTR_ERR(bo);
222 goto error;
223 }
224 dma_resv_unlock(resv);
225
226 return &bo->ttm.base;
227
228error:
229 dma_resv_unlock(resv);
230 return ERR_PTR(ret);
231}
232
233static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
234{
235 struct drm_gem_object *obj = attach->importer_priv;
236 struct xe_bo *bo = gem_to_xe_bo(obj);
237
238 XE_WARN_ON(xe_bo_evict(bo, false));
239}
240
241static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
242 .allow_peer2peer = true,
243 .move_notify = xe_dma_buf_move_notify
244};
245
246#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
247
248struct dma_buf_test_params {
249 struct xe_test_priv base;
250 const struct dma_buf_attach_ops *attach_ops;
251 bool force_different_devices;
252 u32 mem_mask;
253};
254
255#define to_dma_buf_test_params(_priv) \
256 container_of(_priv, struct dma_buf_test_params, base)
257#endif
258
259struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
260 struct dma_buf *dma_buf)
261{
262 XE_TEST_DECLARE(struct dma_buf_test_params *test =
263 to_dma_buf_test_params
264 (xe_cur_kunit_priv(XE_TEST_LIVE_DMA_BUF));)
265 const struct dma_buf_attach_ops *attach_ops;
266 struct dma_buf_attachment *attach;
267 struct drm_gem_object *obj;
268 struct xe_bo *bo;
269
270 if (dma_buf->ops == &xe_dmabuf_ops) {
271 obj = dma_buf->priv;
272 if (obj->dev == dev &&
273 !XE_TEST_ONLY(test && test->force_different_devices)) {
274 /*
275 * Importing dmabuf exported from out own gem increases
276 * refcount on gem itself instead of f_count of dmabuf.
277 */
278 drm_gem_object_get(obj);
279 return obj;
280 }
281 }
282
283 /*
284 * Don't publish the bo until we have a valid attachment, and a
285 * valid attachment needs the bo address. So pre-create a bo before
286 * creating the attachment and publish.
287 */
288 bo = xe_bo_alloc();
289 if (IS_ERR(bo))
290 return ERR_CAST(bo);
291
292 attach_ops = &xe_dma_buf_attach_ops;
293#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
294 if (test)
295 attach_ops = test->attach_ops;
296#endif
297
298 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base);
299 if (IS_ERR(attach)) {
300 obj = ERR_CAST(attach);
301 goto out_err;
302 }
303
304 /* Errors here will take care of freeing the bo. */
305 obj = xe_dma_buf_init_obj(dev, bo, dma_buf);
306 if (IS_ERR(obj))
307 return obj;
308
309
310 get_dma_buf(dma_buf);
311 obj->import_attach = attach;
312 return obj;
313
314out_err:
315 xe_bo_free(bo);
316
317 return obj;
318}
319
320#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
321#include "tests/xe_dma_buf.c"
322#endif