Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
  3 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
  4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  5 * Copyright (c) 2020 Intel Corporation. All rights reserved.
  6 *
  7 * This software is available to you under a choice of one of two
  8 * licenses.  You may choose to be licensed under the terms of the GNU
  9 * General Public License (GPL) Version 2, available from the file
 10 * COPYING in the main directory of this source tree, or the
 11 * OpenIB.org BSD license below:
 12 *
 13 *     Redistribution and use in source and binary forms, with or
 14 *     without modification, are permitted provided that the following
 15 *     conditions are met:
 16 *
 17 *      - Redistributions of source code must retain the above
 18 *        copyright notice, this list of conditions and the following
 19 *        disclaimer.
 20 *
 21 *      - Redistributions in binary form must reproduce the above
 22 *        copyright notice, this list of conditions and the following
 23 *        disclaimer in the documentation and/or other materials
 24 *        provided with the distribution.
 25 *
 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 33 * SOFTWARE.
 34 */
 35
 36#include <linux/mm.h>
 37#include <linux/dma-mapping.h>
 38#include <linux/sched/signal.h>
 39#include <linux/sched/mm.h>
 40#include <linux/export.h>
 41#include <linux/slab.h>
 42#include <linux/pagemap.h>
 43#include <linux/count_zeros.h>
 44#include <rdma/ib_umem_odp.h>
 45
 46#include "uverbs.h"
 47
 48static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
 49{
 50	bool make_dirty = umem->writable && dirty;
 51	struct scatterlist *sg;
 52	unsigned int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53
 54	if (dirty)
 55		ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt,
 56					   DMA_BIDIRECTIONAL, 0);
 57
 58	for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i)
 59		unpin_user_page_range_dirty_lock(sg_page(sg),
 60			DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61
 62	sg_free_append_table(&umem->sgt_append);
 63}
 64
 65/**
 66 * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
 67 *
 68 * @umem: umem struct
 69 * @pgsz_bitmap: bitmap of HW supported page sizes
 70 * @virt: IOVA
 71 *
 72 * This helper is intended for HW that support multiple page
 73 * sizes but can do only a single page size in an MR.
 74 *
 75 * Returns 0 if the umem requires page sizes not supported by
 76 * the driver to be mapped. Drivers always supporting PAGE_SIZE
 77 * or smaller will never see a 0 result.
 78 */
 79unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
 80				     unsigned long pgsz_bitmap,
 81				     unsigned long virt)
 82{
 83	struct scatterlist *sg;
 
 84	unsigned long va, pgoff;
 85	dma_addr_t mask;
 86	int i;
 87
 88	umem->iova = va = virt;
 89
 90	if (umem->is_odp) {
 91		unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
 92
 93		/* ODP must always be self consistent. */
 94		if (!(pgsz_bitmap & page_size))
 95			return 0;
 96		return page_size;
 97	}
 98
 99	/* The best result is the smallest page size that results in the minimum
100	 * number of required pages. Compute the largest page size that could
101	 * work based on VA address bits that don't change.
102	 */
103	mask = pgsz_bitmap &
104	       GENMASK(BITS_PER_LONG - 1,
105		       bits_per((umem->length - 1 + virt) ^ virt));
106	/* offset into first SGL */
107	pgoff = umem->address & ~PAGE_MASK;
108
109	for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
110		/* Walk SGL and reduce max page size if VA/PA bits differ
111		 * for any address.
112		 */
113		mask |= (sg_dma_address(sg) + pgoff) ^ va;
 
 
 
114		va += sg_dma_len(sg) - pgoff;
115		/* Except for the last entry, the ending iova alignment sets
116		 * the maximum possible page size as the low bits of the iova
117		 * must be zero when starting the next chunk.
118		 */
119		if (i != (umem->sgt_append.sgt.nents - 1))
120			mask |= va;
121		pgoff = 0;
122	}
 
123
124	/* The mask accumulates 1's in each position where the VA and physical
125	 * address differ, thus the length of trailing 0 is the largest page
126	 * size that can pass the VA through to the physical.
127	 */
128	if (mask)
129		pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
130	return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
131}
132EXPORT_SYMBOL(ib_umem_find_best_pgsz);
133
134/**
135 * ib_umem_get - Pin and DMA map userspace memory.
136 *
137 * @device: IB device to connect UMEM
138 * @addr: userspace virtual address to start at
139 * @size: length of region to pin
140 * @access: IB_ACCESS_xxx flags for memory being pinned
 
141 */
142struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
143			    size_t size, int access)
144{
 
145	struct ib_umem *umem;
146	struct page **page_list;
147	unsigned long lock_limit;
148	unsigned long new_pinned;
149	unsigned long cur_base;
150	unsigned long dma_attr = 0;
151	struct mm_struct *mm;
152	unsigned long npages;
153	int pinned, ret;
154	unsigned int gup_flags = FOLL_LONGTERM;
 
 
 
 
 
 
 
 
 
 
 
 
 
155
156	/*
157	 * If the combination of the addr and size requested for this memory
158	 * region causes an integer overflow, return error.
159	 */
160	if (((addr + size) < addr) ||
161	    PAGE_ALIGN(addr + size) < (addr + size))
162		return ERR_PTR(-EINVAL);
163
164	if (!can_do_mlock())
165		return ERR_PTR(-EPERM);
166
167	if (access & IB_ACCESS_ON_DEMAND)
168		return ERR_PTR(-EOPNOTSUPP);
169
170	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
171	if (!umem)
172		return ERR_PTR(-ENOMEM);
173	umem->ibdev      = device;
174	umem->length     = size;
175	umem->address    = addr;
176	/*
177	 * Drivers should call ib_umem_find_best_pgsz() to set the iova
178	 * correctly.
179	 */
180	umem->iova = addr;
181	umem->writable   = ib_access_writable(access);
182	umem->owning_mm = mm = current->mm;
183	mmgrab(mm);
184
185	page_list = (struct page **) __get_free_page(GFP_KERNEL);
186	if (!page_list) {
187		ret = -ENOMEM;
188		goto umem_kfree;
189	}
190
191	npages = ib_umem_num_pages(umem);
192	if (npages == 0 || npages > UINT_MAX) {
193		ret = -EINVAL;
194		goto out;
195	}
196
197	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
198
199	new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
200	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
201		atomic64_sub(npages, &mm->pinned_vm);
202		ret = -ENOMEM;
203		goto out;
204	}
205
206	cur_base = addr & PAGE_MASK;
207
208	if (umem->writable)
209		gup_flags |= FOLL_WRITE;
 
 
 
 
 
 
210
211	while (npages) {
212		cond_resched();
213		pinned = pin_user_pages_fast(cur_base,
214					  min_t(unsigned long, npages,
215						PAGE_SIZE /
216						sizeof(struct page *)),
217					  gup_flags, page_list);
218		if (pinned < 0) {
219			ret = pinned;
220			goto umem_release;
221		}
222
223		cur_base += pinned * PAGE_SIZE;
224		npages -= pinned;
225		ret = sg_alloc_append_table_from_pages(
226			&umem->sgt_append, page_list, pinned, 0,
227			pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
228			npages, GFP_KERNEL);
229		if (ret) {
230			unpin_user_pages_dirty_lock(page_list, pinned, 0);
231			goto umem_release;
232		}
233	}
234
235	if (access & IB_ACCESS_RELAXED_ORDERING)
236		dma_attr |= DMA_ATTR_WEAK_ORDERING;
237
238	ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt,
239				       DMA_BIDIRECTIONAL, dma_attr);
240	if (ret)
 
 
 
 
 
241		goto umem_release;
 
 
 
242	goto out;
243
244umem_release:
245	__ib_umem_release(device, umem, 0);
 
246	atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
247out:
248	free_page((unsigned long) page_list);
249umem_kfree:
250	if (ret) {
251		mmdrop(umem->owning_mm);
252		kfree(umem);
253	}
254	return ret ? ERR_PTR(ret) : umem;
255}
256EXPORT_SYMBOL(ib_umem_get);
257
258/**
259 * ib_umem_release - release memory pinned with ib_umem_get
260 * @umem: umem struct to release
261 */
262void ib_umem_release(struct ib_umem *umem)
263{
264	if (!umem)
265		return;
266	if (umem->is_dmabuf)
267		return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem));
268	if (umem->is_odp)
269		return ib_umem_odp_release(to_ib_umem_odp(umem));
270
271	__ib_umem_release(umem->ibdev, umem, 1);
272
273	atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
274	mmdrop(umem->owning_mm);
275	kfree(umem);
276}
277EXPORT_SYMBOL(ib_umem_release);
278
 
 
 
 
 
 
 
 
 
 
 
 
279/*
280 * Copy from the given ib_umem's pages to the given buffer.
281 *
282 * umem - the umem to copy from
283 * offset - offset to start copying from
284 * dst - destination buffer
285 * length - buffer length
286 *
287 * Returns 0 on success, or an error code.
288 */
289int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
290		      size_t length)
291{
292	size_t end = offset + length;
293	int ret;
294
295	if (offset > umem->length || length > umem->length - offset) {
296		pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n",
297		       __func__, offset, umem->length, end);
298		return -EINVAL;
299	}
300
301	ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl,
302				 umem->sgt_append.sgt.orig_nents, dst, length,
303				 offset + ib_umem_offset(umem));
304
305	if (ret < 0)
306		return ret;
307	else if (ret != length)
308		return -EINVAL;
309	else
310		return 0;
311}
312EXPORT_SYMBOL(ib_umem_copy_from);
v5.4
  1/*
  2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
  3 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
  4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
 
  5 *
  6 * This software is available to you under a choice of one of two
  7 * licenses.  You may choose to be licensed under the terms of the GNU
  8 * General Public License (GPL) Version 2, available from the file
  9 * COPYING in the main directory of this source tree, or the
 10 * OpenIB.org BSD license below:
 11 *
 12 *     Redistribution and use in source and binary forms, with or
 13 *     without modification, are permitted provided that the following
 14 *     conditions are met:
 15 *
 16 *      - Redistributions of source code must retain the above
 17 *        copyright notice, this list of conditions and the following
 18 *        disclaimer.
 19 *
 20 *      - Redistributions in binary form must reproduce the above
 21 *        copyright notice, this list of conditions and the following
 22 *        disclaimer in the documentation and/or other materials
 23 *        provided with the distribution.
 24 *
 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 32 * SOFTWARE.
 33 */
 34
 35#include <linux/mm.h>
 36#include <linux/dma-mapping.h>
 37#include <linux/sched/signal.h>
 38#include <linux/sched/mm.h>
 39#include <linux/export.h>
 40#include <linux/slab.h>
 41#include <linux/pagemap.h>
 
 42#include <rdma/ib_umem_odp.h>
 43
 44#include "uverbs.h"
 45
 46static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
 47{
 48	struct sg_page_iter sg_iter;
 49	struct page *page;
 50
 51	if (umem->nmap > 0)
 52		ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
 53				DMA_BIDIRECTIONAL);
 54
 55	for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
 56		page = sg_page_iter_page(&sg_iter);
 57		put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
 58	}
 59
 60	sg_free_table(&umem->sg_head);
 61}
 62
 63/* ib_umem_add_sg_table - Add N contiguous pages to scatter table
 64 *
 65 * sg: current scatterlist entry
 66 * page_list: array of npage struct page pointers
 67 * npages: number of pages in page_list
 68 * max_seg_sz: maximum segment size in bytes
 69 * nents: [out] number of entries in the scatterlist
 70 *
 71 * Return new end of scatterlist
 72 */
 73static struct scatterlist *ib_umem_add_sg_table(struct scatterlist *sg,
 74						struct page **page_list,
 75						unsigned long npages,
 76						unsigned int max_seg_sz,
 77						int *nents)
 78{
 79	unsigned long first_pfn;
 80	unsigned long i = 0;
 81	bool update_cur_sg = false;
 82	bool first = !sg_page(sg);
 83
 84	/* Check if new page_list is contiguous with end of previous page_list.
 85	 * sg->length here is a multiple of PAGE_SIZE and sg->offset is 0.
 86	 */
 87	if (!first && (page_to_pfn(sg_page(sg)) + (sg->length >> PAGE_SHIFT) ==
 88		       page_to_pfn(page_list[0])))
 89		update_cur_sg = true;
 90
 91	while (i != npages) {
 92		unsigned long len;
 93		struct page *first_page = page_list[i];
 94
 95		first_pfn = page_to_pfn(first_page);
 
 
 96
 97		/* Compute the number of contiguous pages we have starting
 98		 * at i
 99		 */
100		for (len = 0; i != npages &&
101			      first_pfn + len == page_to_pfn(page_list[i]) &&
102			      len < (max_seg_sz >> PAGE_SHIFT);
103		     len++)
104			i++;
105
106		/* Squash N contiguous pages from page_list into current sge */
107		if (update_cur_sg) {
108			if ((max_seg_sz - sg->length) >= (len << PAGE_SHIFT)) {
109				sg_set_page(sg, sg_page(sg),
110					    sg->length + (len << PAGE_SHIFT),
111					    0);
112				update_cur_sg = false;
113				continue;
114			}
115			update_cur_sg = false;
116		}
117
118		/* Squash N contiguous pages into next sge or first sge */
119		if (!first)
120			sg = sg_next(sg);
121
122		(*nents)++;
123		sg_set_page(sg, first_page, len << PAGE_SHIFT, 0);
124		first = false;
125	}
126
127	return sg;
128}
129
130/**
131 * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
132 *
133 * @umem: umem struct
134 * @pgsz_bitmap: bitmap of HW supported page sizes
135 * @virt: IOVA
136 *
137 * This helper is intended for HW that support multiple page
138 * sizes but can do only a single page size in an MR.
139 *
140 * Returns 0 if the umem requires page sizes not supported by
141 * the driver to be mapped. Drivers always supporting PAGE_SIZE
142 * or smaller will never see a 0 result.
143 */
144unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
145				     unsigned long pgsz_bitmap,
146				     unsigned long virt)
147{
148	struct scatterlist *sg;
149	unsigned int best_pg_bit;
150	unsigned long va, pgoff;
151	dma_addr_t mask;
152	int i;
153
154	/* At minimum, drivers must support PAGE_SIZE or smaller */
155	if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
156		return 0;
157
158	va = virt;
159	/* max page size not to exceed MR length */
160	mask = roundup_pow_of_two(umem->length);
 
 
 
 
 
 
 
 
 
 
 
161	/* offset into first SGL */
162	pgoff = umem->address & ~PAGE_MASK;
163
164	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
165		/* Walk SGL and reduce max page size if VA/PA bits differ
166		 * for any address.
167		 */
168		mask |= (sg_dma_address(sg) + pgoff) ^ va;
169		if (i && i != (umem->nmap - 1))
170			/* restrict by length as well for interior SGEs */
171			mask |= sg_dma_len(sg);
172		va += sg_dma_len(sg) - pgoff;
 
 
 
 
 
 
173		pgoff = 0;
174	}
175	best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
176
177	return BIT_ULL(best_pg_bit);
 
 
 
 
 
 
178}
179EXPORT_SYMBOL(ib_umem_find_best_pgsz);
180
181/**
182 * ib_umem_get - Pin and DMA map userspace memory.
183 *
184 * @udata: userspace context to pin memory for
185 * @addr: userspace virtual address to start at
186 * @size: length of region to pin
187 * @access: IB_ACCESS_xxx flags for memory being pinned
188 * @dmasync: flush in-flight DMA when the memory region is written
189 */
190struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
191			    size_t size, int access, int dmasync)
192{
193	struct ib_ucontext *context;
194	struct ib_umem *umem;
195	struct page **page_list;
196	unsigned long lock_limit;
197	unsigned long new_pinned;
198	unsigned long cur_base;
 
199	struct mm_struct *mm;
200	unsigned long npages;
201	int ret;
202	unsigned long dma_attrs = 0;
203	struct scatterlist *sg;
204	unsigned int gup_flags = FOLL_WRITE;
205
206	if (!udata)
207		return ERR_PTR(-EIO);
208
209	context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
210			  ->context;
211	if (!context)
212		return ERR_PTR(-EIO);
213
214	if (dmasync)
215		dma_attrs |= DMA_ATTR_WRITE_BARRIER;
216
217	/*
218	 * If the combination of the addr and size requested for this memory
219	 * region causes an integer overflow, return error.
220	 */
221	if (((addr + size) < addr) ||
222	    PAGE_ALIGN(addr + size) < (addr + size))
223		return ERR_PTR(-EINVAL);
224
225	if (!can_do_mlock())
226		return ERR_PTR(-EPERM);
227
228	if (access & IB_ACCESS_ON_DEMAND)
229		return ERR_PTR(-EOPNOTSUPP);
230
231	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
232	if (!umem)
233		return ERR_PTR(-ENOMEM);
234	umem->ibdev = context->device;
235	umem->length     = size;
236	umem->address    = addr;
 
 
 
 
 
237	umem->writable   = ib_access_writable(access);
238	umem->owning_mm = mm = current->mm;
239	mmgrab(mm);
240
241	page_list = (struct page **) __get_free_page(GFP_KERNEL);
242	if (!page_list) {
243		ret = -ENOMEM;
244		goto umem_kfree;
245	}
246
247	npages = ib_umem_num_pages(umem);
248	if (npages == 0 || npages > UINT_MAX) {
249		ret = -EINVAL;
250		goto out;
251	}
252
253	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
254
255	new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
256	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
257		atomic64_sub(npages, &mm->pinned_vm);
258		ret = -ENOMEM;
259		goto out;
260	}
261
262	cur_base = addr & PAGE_MASK;
263
264	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
265	if (ret)
266		goto vma;
267
268	if (!umem->writable)
269		gup_flags |= FOLL_FORCE;
270
271	sg = umem->sg_head.sgl;
272
273	while (npages) {
274		down_read(&mm->mmap_sem);
275		ret = get_user_pages(cur_base,
276				     min_t(unsigned long, npages,
277					   PAGE_SIZE / sizeof (struct page *)),
278				     gup_flags | FOLL_LONGTERM,
279				     page_list, NULL);
280		if (ret < 0) {
281			up_read(&mm->mmap_sem);
282			goto umem_release;
283		}
284
285		cur_base += ret * PAGE_SIZE;
286		npages   -= ret;
287
288		sg = ib_umem_add_sg_table(sg, page_list, ret,
289			dma_get_max_seg_size(context->device->dma_device),
290			&umem->sg_nents);
291
292		up_read(&mm->mmap_sem);
 
 
293	}
294
295	sg_mark_end(sg);
 
296
297	umem->nmap = ib_dma_map_sg_attrs(context->device,
298				  umem->sg_head.sgl,
299				  umem->sg_nents,
300				  DMA_BIDIRECTIONAL,
301				  dma_attrs);
302
303	if (!umem->nmap) {
304		ret = -ENOMEM;
305		goto umem_release;
306	}
307
308	ret = 0;
309	goto out;
310
311umem_release:
312	__ib_umem_release(context->device, umem, 0);
313vma:
314	atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
315out:
316	free_page((unsigned long) page_list);
317umem_kfree:
318	if (ret) {
319		mmdrop(umem->owning_mm);
320		kfree(umem);
321	}
322	return ret ? ERR_PTR(ret) : umem;
323}
324EXPORT_SYMBOL(ib_umem_get);
325
326/**
327 * ib_umem_release - release memory pinned with ib_umem_get
328 * @umem: umem struct to release
329 */
330void ib_umem_release(struct ib_umem *umem)
331{
332	if (!umem)
333		return;
 
 
334	if (umem->is_odp)
335		return ib_umem_odp_release(to_ib_umem_odp(umem));
336
337	__ib_umem_release(umem->ibdev, umem, 1);
338
339	atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
340	mmdrop(umem->owning_mm);
341	kfree(umem);
342}
343EXPORT_SYMBOL(ib_umem_release);
344
345int ib_umem_page_count(struct ib_umem *umem)
346{
347	int i, n = 0;
348	struct scatterlist *sg;
349
350	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
351		n += sg_dma_len(sg) >> PAGE_SHIFT;
352
353	return n;
354}
355EXPORT_SYMBOL(ib_umem_page_count);
356
357/*
358 * Copy from the given ib_umem's pages to the given buffer.
359 *
360 * umem - the umem to copy from
361 * offset - offset to start copying from
362 * dst - destination buffer
363 * length - buffer length
364 *
365 * Returns 0 on success, or an error code.
366 */
367int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
368		      size_t length)
369{
370	size_t end = offset + length;
371	int ret;
372
373	if (offset > umem->length || length > umem->length - offset) {
374		pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
375		       offset, umem->length, end);
376		return -EINVAL;
377	}
378
379	ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
 
380				 offset + ib_umem_offset(umem));
381
382	if (ret < 0)
383		return ret;
384	else if (ret != length)
385		return -EINVAL;
386	else
387		return 0;
388}
389EXPORT_SYMBOL(ib_umem_copy_from);