Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2022 Intel Corporation
  4 */
  5
  6#include "xe_dma_buf.h"
  7
  8#include <kunit/test.h>
  9#include <linux/dma-buf.h>
 10#include <linux/pci-p2pdma.h>
 11
 12#include <drm/drm_device.h>
 13#include <drm/drm_prime.h>
 14#include <drm/ttm/ttm_tt.h>
 15
 16#include "tests/xe_test.h"
 17#include "xe_bo.h"
 18#include "xe_device.h"
 19#include "xe_ttm_vram_mgr.h"
 20#include "xe_vm.h"
 21
 22MODULE_IMPORT_NS(DMA_BUF);
 23
 24static int xe_dma_buf_attach(struct dma_buf *dmabuf,
 25			     struct dma_buf_attachment *attach)
 26{
 27	struct drm_gem_object *obj = attach->dmabuf->priv;
 28
 29	if (attach->peer2peer &&
 30	    pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0)
 31		attach->peer2peer = false;
 32
 33	if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
 34		return -EOPNOTSUPP;
 35
 36	xe_device_mem_access_get(to_xe_device(obj->dev));
 37	return 0;
 38}
 39
 40static void xe_dma_buf_detach(struct dma_buf *dmabuf,
 41			      struct dma_buf_attachment *attach)
 42{
 43	struct drm_gem_object *obj = attach->dmabuf->priv;
 44
 45	xe_device_mem_access_put(to_xe_device(obj->dev));
 46}
 47
 48static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
 49{
 50	struct drm_gem_object *obj = attach->dmabuf->priv;
 51	struct xe_bo *bo = gem_to_xe_bo(obj);
 52	struct xe_device *xe = xe_bo_device(bo);
 53	int ret;
 54
 55	/*
 56	 * For now only support pinning in TT memory, for two reasons:
 57	 * 1) Avoid pinning in a placement not accessible to some importers.
 58	 * 2) Pinning in VRAM requires PIN accounting which is a to-do.
 59	 */
 60	if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
 61		drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
 62		return -EINVAL;
 63	}
 64
 65	ret = xe_bo_migrate(bo, XE_PL_TT);
 66	if (ret) {
 67		if (ret != -EINTR && ret != -ERESTARTSYS)
 68			drm_dbg(&xe->drm,
 69				"Failed migrating dma-buf to TT memory: %pe\n",
 70				ERR_PTR(ret));
 71		return ret;
 72	}
 73
 74	ret = xe_bo_pin_external(bo);
 75	xe_assert(xe, !ret);
 76
 77	return 0;
 78}
 79
 80static void xe_dma_buf_unpin(struct dma_buf_attachment *attach)
 81{
 82	struct drm_gem_object *obj = attach->dmabuf->priv;
 83	struct xe_bo *bo = gem_to_xe_bo(obj);
 84
 85	xe_bo_unpin_external(bo);
 86}
 87
 88static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
 89				       enum dma_data_direction dir)
 90{
 91	struct dma_buf *dma_buf = attach->dmabuf;
 92	struct drm_gem_object *obj = dma_buf->priv;
 93	struct xe_bo *bo = gem_to_xe_bo(obj);
 94	struct sg_table *sgt;
 95	int r = 0;
 96
 97	if (!attach->peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT))
 98		return ERR_PTR(-EOPNOTSUPP);
 99
100	if (!xe_bo_is_pinned(bo)) {
101		if (!attach->peer2peer)
102			r = xe_bo_migrate(bo, XE_PL_TT);
103		else
104			r = xe_bo_validate(bo, NULL, false);
105		if (r)
106			return ERR_PTR(r);
107	}
108
109	switch (bo->ttm.resource->mem_type) {
110	case XE_PL_TT:
111		sgt = drm_prime_pages_to_sg(obj->dev,
112					    bo->ttm.ttm->pages,
113					    bo->ttm.ttm->num_pages);
114		if (IS_ERR(sgt))
115			return sgt;
116
117		if (dma_map_sgtable(attach->dev, sgt, dir,
118				    DMA_ATTR_SKIP_CPU_SYNC))
119			goto error_free;
120		break;
121
122	case XE_PL_VRAM0:
123	case XE_PL_VRAM1:
124		r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo),
125					      bo->ttm.resource, 0,
126					      bo->ttm.base.size, attach->dev,
127					      dir, &sgt);
128		if (r)
129			return ERR_PTR(r);
130		break;
131	default:
132		return ERR_PTR(-EINVAL);
133	}
134
135	return sgt;
136
137error_free:
138	sg_free_table(sgt);
139	kfree(sgt);
140	return ERR_PTR(-EBUSY);
141}
142
143static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
144			     struct sg_table *sgt,
145			     enum dma_data_direction dir)
146{
147	struct dma_buf *dma_buf = attach->dmabuf;
148	struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv);
149
150	if (!xe_bo_is_vram(bo)) {
151		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
152		sg_free_table(sgt);
153		kfree(sgt);
154	} else {
155		xe_ttm_vram_mgr_free_sgt(attach->dev, dir, sgt);
156	}
157}
158
159static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
160				       enum dma_data_direction direction)
161{
162	struct drm_gem_object *obj = dma_buf->priv;
163	struct xe_bo *bo = gem_to_xe_bo(obj);
164	bool reads =  (direction == DMA_BIDIRECTIONAL ||
165		       direction == DMA_FROM_DEVICE);
166
167	if (!reads)
168		return 0;
169
170	/* Can we do interruptible lock here? */
171	xe_bo_lock(bo, false);
172	(void)xe_bo_migrate(bo, XE_PL_TT);
173	xe_bo_unlock(bo);
174
175	return 0;
176}
177
178static const struct dma_buf_ops xe_dmabuf_ops = {
179	.attach = xe_dma_buf_attach,
180	.detach = xe_dma_buf_detach,
181	.pin = xe_dma_buf_pin,
182	.unpin = xe_dma_buf_unpin,
183	.map_dma_buf = xe_dma_buf_map,
184	.unmap_dma_buf = xe_dma_buf_unmap,
185	.release = drm_gem_dmabuf_release,
186	.begin_cpu_access = xe_dma_buf_begin_cpu_access,
187	.mmap = drm_gem_dmabuf_mmap,
188	.vmap = drm_gem_dmabuf_vmap,
189	.vunmap = drm_gem_dmabuf_vunmap,
190};
191
192struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
193{
194	struct xe_bo *bo = gem_to_xe_bo(obj);
195	struct dma_buf *buf;
196
197	if (bo->vm)
198		return ERR_PTR(-EPERM);
199
200	buf = drm_gem_prime_export(obj, flags);
201	if (!IS_ERR(buf))
202		buf->ops = &xe_dmabuf_ops;
203
204	return buf;
205}
206
207static struct drm_gem_object *
208xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
209		    struct dma_buf *dma_buf)
210{
211	struct dma_resv *resv = dma_buf->resv;
212	struct xe_device *xe = to_xe_device(dev);
213	struct xe_bo *bo;
214	int ret;
215
216	dma_resv_lock(resv, NULL);
217	bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
218				    0, /* Will require 1way or 2way for vm_bind */
219				    ttm_bo_type_sg, XE_BO_CREATE_SYSTEM_BIT);
220	if (IS_ERR(bo)) {
221		ret = PTR_ERR(bo);
222		goto error;
223	}
224	dma_resv_unlock(resv);
225
226	return &bo->ttm.base;
227
228error:
229	dma_resv_unlock(resv);
230	return ERR_PTR(ret);
231}
232
233static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
234{
235	struct drm_gem_object *obj = attach->importer_priv;
236	struct xe_bo *bo = gem_to_xe_bo(obj);
237
238	XE_WARN_ON(xe_bo_evict(bo, false));
239}
240
241static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
242	.allow_peer2peer = true,
243	.move_notify = xe_dma_buf_move_notify
244};
245
246#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
247
248struct dma_buf_test_params {
249	struct xe_test_priv base;
250	const struct dma_buf_attach_ops *attach_ops;
251	bool force_different_devices;
252	u32 mem_mask;
253};
254
255#define to_dma_buf_test_params(_priv) \
256	container_of(_priv, struct dma_buf_test_params, base)
257#endif
258
259struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
260					   struct dma_buf *dma_buf)
261{
262	XE_TEST_DECLARE(struct dma_buf_test_params *test =
263			to_dma_buf_test_params
264			(xe_cur_kunit_priv(XE_TEST_LIVE_DMA_BUF));)
265	const struct dma_buf_attach_ops *attach_ops;
266	struct dma_buf_attachment *attach;
267	struct drm_gem_object *obj;
268	struct xe_bo *bo;
269
270	if (dma_buf->ops == &xe_dmabuf_ops) {
271		obj = dma_buf->priv;
272		if (obj->dev == dev &&
273		    !XE_TEST_ONLY(test && test->force_different_devices)) {
274			/*
275			 * Importing dmabuf exported from out own gem increases
276			 * refcount on gem itself instead of f_count of dmabuf.
277			 */
278			drm_gem_object_get(obj);
279			return obj;
280		}
281	}
282
283	/*
284	 * Don't publish the bo until we have a valid attachment, and a
285	 * valid attachment needs the bo address. So pre-create a bo before
286	 * creating the attachment and publish.
287	 */
288	bo = xe_bo_alloc();
289	if (IS_ERR(bo))
290		return ERR_CAST(bo);
291
292	attach_ops = &xe_dma_buf_attach_ops;
293#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
294	if (test)
295		attach_ops = test->attach_ops;
296#endif
297
298	attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base);
299	if (IS_ERR(attach)) {
300		obj = ERR_CAST(attach);
301		goto out_err;
302	}
303
304	/* Errors here will take care of freeing the bo. */
305	obj = xe_dma_buf_init_obj(dev, bo, dma_buf);
306	if (IS_ERR(obj))
307		return obj;
308
309
310	get_dma_buf(dma_buf);
311	obj->import_attach = attach;
312	return obj;
313
314out_err:
315	xe_bo_free(bo);
316
317	return obj;
318}
319
320#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
321#include "tests/xe_dma_buf.c"
322#endif