Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
  2/*
  3 * Copyright (c) 2020 Intel Corporation. All rights reserved.
  4 */
  5
  6#include <linux/dma-buf.h>
  7#include <linux/dma-resv.h>
  8#include <linux/dma-mapping.h>
  9#include <linux/module.h>
 10
 11#include "uverbs.h"
 12
 13MODULE_IMPORT_NS(DMA_BUF);
 14
 15int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
 16{
 17	struct sg_table *sgt;
 18	struct scatterlist *sg;
 19	unsigned long start, end, cur = 0;
 20	unsigned int nmap = 0;
 21	long ret;
 22	int i;
 23
 24	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
 25
 
 
 
 26	if (umem_dmabuf->sgt)
 27		goto wait_fence;
 28
 29	sgt = dma_buf_map_attachment(umem_dmabuf->attach,
 30				     DMA_BIDIRECTIONAL);
 31	if (IS_ERR(sgt))
 32		return PTR_ERR(sgt);
 33
 34	/* modify the sg list in-place to match umem address and length */
 35
 36	start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
 37	end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
 38		    PAGE_SIZE);
 39	for_each_sgtable_dma_sg(sgt, sg, i) {
 40		if (start < cur + sg_dma_len(sg) && cur < end)
 41			nmap++;
 42		if (cur <= start && start < cur + sg_dma_len(sg)) {
 43			unsigned long offset = start - cur;
 44
 45			umem_dmabuf->first_sg = sg;
 46			umem_dmabuf->first_sg_offset = offset;
 47			sg_dma_address(sg) += offset;
 48			sg_dma_len(sg) -= offset;
 49			cur += offset;
 50		}
 51		if (cur < end && end <= cur + sg_dma_len(sg)) {
 52			unsigned long trim = cur + sg_dma_len(sg) - end;
 53
 54			umem_dmabuf->last_sg = sg;
 55			umem_dmabuf->last_sg_trim = trim;
 56			sg_dma_len(sg) -= trim;
 57			break;
 58		}
 59		cur += sg_dma_len(sg);
 60	}
 61
 62	umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
 63	umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
 64	umem_dmabuf->sgt = sgt;
 65
 66wait_fence:
 67	/*
 68	 * Although the sg list is valid now, the content of the pages
 69	 * may be not up-to-date. Wait for the exporter to finish
 70	 * the migration.
 71	 */
 72	ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
 73				     DMA_RESV_USAGE_KERNEL,
 74				     false, MAX_SCHEDULE_TIMEOUT);
 75	if (ret < 0)
 76		return ret;
 77	if (ret == 0)
 78		return -ETIMEDOUT;
 79	return 0;
 80}
 81EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
 82
 83void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
 84{
 85	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
 86
 87	if (!umem_dmabuf->sgt)
 88		return;
 89
 90	/* retore the original sg list */
 91	if (umem_dmabuf->first_sg) {
 92		sg_dma_address(umem_dmabuf->first_sg) -=
 93			umem_dmabuf->first_sg_offset;
 94		sg_dma_len(umem_dmabuf->first_sg) +=
 95			umem_dmabuf->first_sg_offset;
 96		umem_dmabuf->first_sg = NULL;
 97		umem_dmabuf->first_sg_offset = 0;
 98	}
 99	if (umem_dmabuf->last_sg) {
100		sg_dma_len(umem_dmabuf->last_sg) +=
101			umem_dmabuf->last_sg_trim;
102		umem_dmabuf->last_sg = NULL;
103		umem_dmabuf->last_sg_trim = 0;
104	}
105
106	dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
107				 DMA_BIDIRECTIONAL);
108
109	umem_dmabuf->sgt = NULL;
110}
111EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
112
113struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
114					  unsigned long offset, size_t size,
115					  int fd, int access,
116					  const struct dma_buf_attach_ops *ops)
 
 
117{
118	struct dma_buf *dmabuf;
119	struct ib_umem_dmabuf *umem_dmabuf;
120	struct ib_umem *umem;
121	unsigned long end;
122	struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
123
124	if (check_add_overflow(offset, (unsigned long)size, &end))
125		return ret;
126
127	if (unlikely(!ops || !ops->move_notify))
128		return ret;
129
130	dmabuf = dma_buf_get(fd);
131	if (IS_ERR(dmabuf))
132		return ERR_CAST(dmabuf);
133
134	if (dmabuf->size < end)
135		goto out_release_dmabuf;
136
137	umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
138	if (!umem_dmabuf) {
139		ret = ERR_PTR(-ENOMEM);
140		goto out_release_dmabuf;
141	}
142
143	umem = &umem_dmabuf->umem;
144	umem->ibdev = device;
145	umem->length = size;
146	umem->address = offset;
147	umem->writable = ib_access_writable(access);
148	umem->is_dmabuf = 1;
149
150	if (!ib_umem_num_pages(umem))
151		goto out_free_umem;
152
153	umem_dmabuf->attach = dma_buf_dynamic_attach(
154					dmabuf,
155					device->dma_device,
156					ops,
157					umem_dmabuf);
158	if (IS_ERR(umem_dmabuf->attach)) {
159		ret = ERR_CAST(umem_dmabuf->attach);
160		goto out_free_umem;
161	}
162	return umem_dmabuf;
163
164out_free_umem:
165	kfree(umem_dmabuf);
166
167out_release_dmabuf:
168	dma_buf_put(dmabuf);
169	return ret;
170}
 
 
 
 
 
 
 
 
 
171EXPORT_SYMBOL(ib_umem_dmabuf_get);
172
173static void
174ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
175{
176	struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
177
178	ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
179			       "Invalidate callback should not be called when memory is pinned\n");
180}
181
182static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
183	.allow_peer2peer = true,
184	.move_notify = ib_umem_dmabuf_unsupported_move_notify,
185};
186
187struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
188						 unsigned long offset,
189						 size_t size, int fd,
190						 int access)
 
191{
192	struct ib_umem_dmabuf *umem_dmabuf;
193	int err;
194
195	umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
196					 &ib_umem_dmabuf_attach_pinned_ops);
 
197	if (IS_ERR(umem_dmabuf))
198		return umem_dmabuf;
199
200	dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
201	err = dma_buf_pin(umem_dmabuf->attach);
202	if (err)
203		goto err_release;
204	umem_dmabuf->pinned = 1;
205
206	err = ib_umem_dmabuf_map_pages(umem_dmabuf);
207	if (err)
208		goto err_unpin;
209	dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
210
211	return umem_dmabuf;
212
213err_unpin:
214	dma_buf_unpin(umem_dmabuf->attach);
215err_release:
216	dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
217	ib_umem_release(&umem_dmabuf->umem);
218	return ERR_PTR(err);
219}
 
 
 
 
 
 
 
 
 
 
220EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
221
222void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
223{
224	struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
225
226	dma_resv_lock(dmabuf->resv, NULL);
 
 
227	ib_umem_dmabuf_unmap_pages(umem_dmabuf);
228	if (umem_dmabuf->pinned)
229		dma_buf_unpin(umem_dmabuf->attach);
 
 
 
 
230	dma_resv_unlock(dmabuf->resv);
 
 
 
 
 
 
 
 
231
232	dma_buf_detach(dmabuf, umem_dmabuf->attach);
233	dma_buf_put(dmabuf);
234	kfree(umem_dmabuf);
235}
v6.13.7
  1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
  2/*
  3 * Copyright (c) 2020 Intel Corporation. All rights reserved.
  4 */
  5
  6#include <linux/dma-buf.h>
  7#include <linux/dma-resv.h>
  8#include <linux/dma-mapping.h>
  9#include <linux/module.h>
 10
 11#include "uverbs.h"
 12
 13MODULE_IMPORT_NS("DMA_BUF");
 14
 15int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
 16{
 17	struct sg_table *sgt;
 18	struct scatterlist *sg;
 19	unsigned long start, end, cur = 0;
 20	unsigned int nmap = 0;
 21	long ret;
 22	int i;
 23
 24	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
 25
 26	if (umem_dmabuf->revoked)
 27		return -EINVAL;
 28
 29	if (umem_dmabuf->sgt)
 30		goto wait_fence;
 31
 32	sgt = dma_buf_map_attachment(umem_dmabuf->attach,
 33				     DMA_BIDIRECTIONAL);
 34	if (IS_ERR(sgt))
 35		return PTR_ERR(sgt);
 36
 37	/* modify the sg list in-place to match umem address and length */
 38
 39	start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
 40	end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
 41		    PAGE_SIZE);
 42	for_each_sgtable_dma_sg(sgt, sg, i) {
 43		if (start < cur + sg_dma_len(sg) && cur < end)
 44			nmap++;
 45		if (cur <= start && start < cur + sg_dma_len(sg)) {
 46			unsigned long offset = start - cur;
 47
 48			umem_dmabuf->first_sg = sg;
 49			umem_dmabuf->first_sg_offset = offset;
 50			sg_dma_address(sg) += offset;
 51			sg_dma_len(sg) -= offset;
 52			cur += offset;
 53		}
 54		if (cur < end && end <= cur + sg_dma_len(sg)) {
 55			unsigned long trim = cur + sg_dma_len(sg) - end;
 56
 57			umem_dmabuf->last_sg = sg;
 58			umem_dmabuf->last_sg_trim = trim;
 59			sg_dma_len(sg) -= trim;
 60			break;
 61		}
 62		cur += sg_dma_len(sg);
 63	}
 64
 65	umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
 66	umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
 67	umem_dmabuf->sgt = sgt;
 68
 69wait_fence:
 70	/*
 71	 * Although the sg list is valid now, the content of the pages
 72	 * may be not up-to-date. Wait for the exporter to finish
 73	 * the migration.
 74	 */
 75	ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
 76				     DMA_RESV_USAGE_KERNEL,
 77				     false, MAX_SCHEDULE_TIMEOUT);
 78	if (ret < 0)
 79		return ret;
 80	if (ret == 0)
 81		return -ETIMEDOUT;
 82	return 0;
 83}
 84EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
 85
 86void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
 87{
 88	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
 89
 90	if (!umem_dmabuf->sgt)
 91		return;
 92
 93	/* retore the original sg list */
 94	if (umem_dmabuf->first_sg) {
 95		sg_dma_address(umem_dmabuf->first_sg) -=
 96			umem_dmabuf->first_sg_offset;
 97		sg_dma_len(umem_dmabuf->first_sg) +=
 98			umem_dmabuf->first_sg_offset;
 99		umem_dmabuf->first_sg = NULL;
100		umem_dmabuf->first_sg_offset = 0;
101	}
102	if (umem_dmabuf->last_sg) {
103		sg_dma_len(umem_dmabuf->last_sg) +=
104			umem_dmabuf->last_sg_trim;
105		umem_dmabuf->last_sg = NULL;
106		umem_dmabuf->last_sg_trim = 0;
107	}
108
109	dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
110				 DMA_BIDIRECTIONAL);
111
112	umem_dmabuf->sgt = NULL;
113}
114EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
115
116static struct ib_umem_dmabuf *
117ib_umem_dmabuf_get_with_dma_device(struct ib_device *device,
118				   struct device *dma_device,
119				   unsigned long offset, size_t size,
120				   int fd, int access,
121				   const struct dma_buf_attach_ops *ops)
122{
123	struct dma_buf *dmabuf;
124	struct ib_umem_dmabuf *umem_dmabuf;
125	struct ib_umem *umem;
126	unsigned long end;
127	struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
128
129	if (check_add_overflow(offset, (unsigned long)size, &end))
130		return ret;
131
132	if (unlikely(!ops || !ops->move_notify))
133		return ret;
134
135	dmabuf = dma_buf_get(fd);
136	if (IS_ERR(dmabuf))
137		return ERR_CAST(dmabuf);
138
139	if (dmabuf->size < end)
140		goto out_release_dmabuf;
141
142	umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
143	if (!umem_dmabuf) {
144		ret = ERR_PTR(-ENOMEM);
145		goto out_release_dmabuf;
146	}
147
148	umem = &umem_dmabuf->umem;
149	umem->ibdev = device;
150	umem->length = size;
151	umem->address = offset;
152	umem->writable = ib_access_writable(access);
153	umem->is_dmabuf = 1;
154
155	if (!ib_umem_num_pages(umem))
156		goto out_free_umem;
157
158	umem_dmabuf->attach = dma_buf_dynamic_attach(
159					dmabuf,
160					dma_device,
161					ops,
162					umem_dmabuf);
163	if (IS_ERR(umem_dmabuf->attach)) {
164		ret = ERR_CAST(umem_dmabuf->attach);
165		goto out_free_umem;
166	}
167	return umem_dmabuf;
168
169out_free_umem:
170	kfree(umem_dmabuf);
171
172out_release_dmabuf:
173	dma_buf_put(dmabuf);
174	return ret;
175}
176
177struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
178					  unsigned long offset, size_t size,
179					  int fd, int access,
180					  const struct dma_buf_attach_ops *ops)
181{
182	return ib_umem_dmabuf_get_with_dma_device(device, device->dma_device,
183						  offset, size, fd, access, ops);
184}
185EXPORT_SYMBOL(ib_umem_dmabuf_get);
186
187static void
188ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
189{
190	struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
191
192	ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
193			       "Invalidate callback should not be called when memory is pinned\n");
194}
195
196static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
197	.allow_peer2peer = true,
198	.move_notify = ib_umem_dmabuf_unsupported_move_notify,
199};
200
201struct ib_umem_dmabuf *
202ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
203					  struct device *dma_device,
204					  unsigned long offset, size_t size,
205					  int fd, int access)
206{
207	struct ib_umem_dmabuf *umem_dmabuf;
208	int err;
209
210	umem_dmabuf = ib_umem_dmabuf_get_with_dma_device(device, dma_device, offset,
211							 size, fd, access,
212							 &ib_umem_dmabuf_attach_pinned_ops);
213	if (IS_ERR(umem_dmabuf))
214		return umem_dmabuf;
215
216	dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
217	err = dma_buf_pin(umem_dmabuf->attach);
218	if (err)
219		goto err_release;
220	umem_dmabuf->pinned = 1;
221
222	err = ib_umem_dmabuf_map_pages(umem_dmabuf);
223	if (err)
224		goto err_unpin;
225	dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
226
227	return umem_dmabuf;
228
229err_unpin:
230	dma_buf_unpin(umem_dmabuf->attach);
231err_release:
232	dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
233	ib_umem_release(&umem_dmabuf->umem);
234	return ERR_PTR(err);
235}
236EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned_with_dma_device);
237
238struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
239						 unsigned long offset,
240						 size_t size, int fd,
241						 int access)
242{
243	return ib_umem_dmabuf_get_pinned_with_dma_device(device, device->dma_device,
244							 offset, size, fd, access);
245}
246EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
247
248void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf)
249{
250	struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
251
252	dma_resv_lock(dmabuf->resv, NULL);
253	if (umem_dmabuf->revoked)
254		goto end;
255	ib_umem_dmabuf_unmap_pages(umem_dmabuf);
256	if (umem_dmabuf->pinned) {
257		dma_buf_unpin(umem_dmabuf->attach);
258		umem_dmabuf->pinned = 0;
259	}
260	umem_dmabuf->revoked = 1;
261end:
262	dma_resv_unlock(dmabuf->resv);
263}
264EXPORT_SYMBOL(ib_umem_dmabuf_revoke);
265
266void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
267{
268	struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
269
270	ib_umem_dmabuf_revoke(umem_dmabuf);
271
272	dma_buf_detach(dmabuf, umem_dmabuf->attach);
273	dma_buf_put(dmabuf);
274	kfree(umem_dmabuf);
275}