Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
  3 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
  4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  5 * Copyright (c) 2020 Intel Corporation. All rights reserved.
  6 *
  7 * This software is available to you under a choice of one of two
  8 * licenses.  You may choose to be licensed under the terms of the GNU
  9 * General Public License (GPL) Version 2, available from the file
 10 * COPYING in the main directory of this source tree, or the
 11 * OpenIB.org BSD license below:
 12 *
 13 *     Redistribution and use in source and binary forms, with or
 14 *     without modification, are permitted provided that the following
 15 *     conditions are met:
 16 *
 17 *      - Redistributions of source code must retain the above
 18 *        copyright notice, this list of conditions and the following
 19 *        disclaimer.
 20 *
 21 *      - Redistributions in binary form must reproduce the above
 22 *        copyright notice, this list of conditions and the following
 23 *        disclaimer in the documentation and/or other materials
 24 *        provided with the distribution.
 25 *
 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 33 * SOFTWARE.
 34 */
 35
 36#include <linux/mm.h>
 37#include <linux/dma-mapping.h>
 38#include <linux/sched/signal.h>
 39#include <linux/sched/mm.h>
 40#include <linux/export.h>
 41#include <linux/slab.h>
 42#include <linux/pagemap.h>
 43#include <linux/count_zeros.h>
 44#include <rdma/ib_umem_odp.h>
 45
 46#include "uverbs.h"
 47
 48static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
 49{
 50	bool make_dirty = umem->writable && dirty;
 51	struct scatterlist *sg;
 52	unsigned int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53
 54	if (dirty)
 55		ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt,
 56					   DMA_BIDIRECTIONAL, 0);
 
 
 
 
 
 
 
 57
 58	for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i)
 59		unpin_user_page_range_dirty_lock(sg_page(sg),
 60			DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
 61
 62	sg_free_append_table(&umem->sgt_append);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63}
 64
 65/**
 66 * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
 67 *
 68 * @umem: umem struct
 69 * @pgsz_bitmap: bitmap of HW supported page sizes
 70 * @virt: IOVA
 71 *
 72 * This helper is intended for HW that support multiple page
 73 * sizes but can do only a single page size in an MR.
 74 *
 75 * Returns 0 if the umem requires page sizes not supported by
 76 * the driver to be mapped. Drivers always supporting PAGE_SIZE
 77 * or smaller will never see a 0 result.
 78 */
 79unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
 80				     unsigned long pgsz_bitmap,
 81				     unsigned long virt)
 82{
 83	struct scatterlist *sg;
 
 84	unsigned long va, pgoff;
 85	dma_addr_t mask;
 86	int i;
 87
 88	umem->iova = va = virt;
 89
 90	if (umem->is_odp) {
 91		unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
 92
 93		/* ODP must always be self consistent. */
 94		if (!(pgsz_bitmap & page_size))
 95			return 0;
 96		return page_size;
 97	}
 98
 99	/* The best result is the smallest page size that results in the minimum
100	 * number of required pages. Compute the largest page size that could
101	 * work based on VA address bits that don't change.
102	 */
103	mask = pgsz_bitmap &
104	       GENMASK(BITS_PER_LONG - 1,
105		       bits_per((umem->length - 1 + virt) ^ virt));
106	/* offset into first SGL */
107	pgoff = umem->address & ~PAGE_MASK;
108
109	for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
110		/* Walk SGL and reduce max page size if VA/PA bits differ
111		 * for any address.
112		 */
113		mask |= (sg_dma_address(sg) + pgoff) ^ va;
114		va += sg_dma_len(sg) - pgoff;
115		/* Except for the last entry, the ending iova alignment sets
116		 * the maximum possible page size as the low bits of the iova
117		 * must be zero when starting the next chunk.
118		 */
119		if (i != (umem->sgt_append.sgt.nents - 1))
120			mask |= va;
121		pgoff = 0;
122	}
 
123
124	/* The mask accumulates 1's in each position where the VA and physical
125	 * address differ, thus the length of trailing 0 is the largest page
126	 * size that can pass the VA through to the physical.
127	 */
128	if (mask)
129		pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
130	return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
131}
132EXPORT_SYMBOL(ib_umem_find_best_pgsz);
133
134/**
135 * ib_umem_get - Pin and DMA map userspace memory.
136 *
137 * @device: IB device to connect UMEM
138 * @addr: userspace virtual address to start at
139 * @size: length of region to pin
140 * @access: IB_ACCESS_xxx flags for memory being pinned
141 */
142struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
143			    size_t size, int access)
144{
145	struct ib_umem *umem;
146	struct page **page_list;
147	unsigned long lock_limit;
148	unsigned long new_pinned;
149	unsigned long cur_base;
150	unsigned long dma_attr = 0;
151	struct mm_struct *mm;
152	unsigned long npages;
153	int pinned, ret;
154	unsigned int gup_flags = FOLL_LONGTERM;
 
155
156	/*
157	 * If the combination of the addr and size requested for this memory
158	 * region causes an integer overflow, return error.
159	 */
160	if (((addr + size) < addr) ||
161	    PAGE_ALIGN(addr + size) < (addr + size))
162		return ERR_PTR(-EINVAL);
163
164	if (!can_do_mlock())
165		return ERR_PTR(-EPERM);
166
167	if (access & IB_ACCESS_ON_DEMAND)
168		return ERR_PTR(-EOPNOTSUPP);
169
170	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
171	if (!umem)
172		return ERR_PTR(-ENOMEM);
173	umem->ibdev      = device;
174	umem->length     = size;
175	umem->address    = addr;
176	/*
177	 * Drivers should call ib_umem_find_best_pgsz() to set the iova
178	 * correctly.
179	 */
180	umem->iova = addr;
181	umem->writable   = ib_access_writable(access);
182	umem->owning_mm = mm = current->mm;
183	mmgrab(mm);
184
185	page_list = (struct page **) __get_free_page(GFP_KERNEL);
186	if (!page_list) {
187		ret = -ENOMEM;
188		goto umem_kfree;
189	}
190
191	npages = ib_umem_num_pages(umem);
192	if (npages == 0 || npages > UINT_MAX) {
193		ret = -EINVAL;
194		goto out;
195	}
196
197	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
198
199	new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
200	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
201		atomic64_sub(npages, &mm->pinned_vm);
202		ret = -ENOMEM;
203		goto out;
204	}
205
206	cur_base = addr & PAGE_MASK;
207
208	if (umem->writable)
209		gup_flags |= FOLL_WRITE;
 
 
 
 
 
 
210
211	while (npages) {
212		cond_resched();
213		pinned = pin_user_pages_fast(cur_base,
214					  min_t(unsigned long, npages,
215						PAGE_SIZE /
216						sizeof(struct page *)),
217					  gup_flags, page_list);
218		if (pinned < 0) {
219			ret = pinned;
220			goto umem_release;
221		}
222
223		cur_base += pinned * PAGE_SIZE;
224		npages -= pinned;
225		ret = sg_alloc_append_table_from_pages(
226			&umem->sgt_append, page_list, pinned, 0,
227			pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
228			npages, GFP_KERNEL);
229		if (ret) {
230			unpin_user_pages_dirty_lock(page_list, pinned, 0);
231			goto umem_release;
232		}
233	}
234
 
 
235	if (access & IB_ACCESS_RELAXED_ORDERING)
236		dma_attr |= DMA_ATTR_WEAK_ORDERING;
237
238	ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt,
239				       DMA_BIDIRECTIONAL, dma_attr);
240	if (ret)
 
 
 
241		goto umem_release;
 
 
 
242	goto out;
243
244umem_release:
245	__ib_umem_release(device, umem, 0);
 
246	atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
247out:
248	free_page((unsigned long) page_list);
249umem_kfree:
250	if (ret) {
251		mmdrop(umem->owning_mm);
252		kfree(umem);
253	}
254	return ret ? ERR_PTR(ret) : umem;
255}
256EXPORT_SYMBOL(ib_umem_get);
257
258/**
259 * ib_umem_release - release memory pinned with ib_umem_get
260 * @umem: umem struct to release
261 */
262void ib_umem_release(struct ib_umem *umem)
263{
264	if (!umem)
265		return;
266	if (umem->is_dmabuf)
267		return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem));
268	if (umem->is_odp)
269		return ib_umem_odp_release(to_ib_umem_odp(umem));
270
271	__ib_umem_release(umem->ibdev, umem, 1);
272
273	atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
274	mmdrop(umem->owning_mm);
275	kfree(umem);
276}
277EXPORT_SYMBOL(ib_umem_release);
278
 
 
 
 
 
 
 
 
 
 
 
 
279/*
280 * Copy from the given ib_umem's pages to the given buffer.
281 *
282 * umem - the umem to copy from
283 * offset - offset to start copying from
284 * dst - destination buffer
285 * length - buffer length
286 *
287 * Returns 0 on success, or an error code.
288 */
289int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
290		      size_t length)
291{
292	size_t end = offset + length;
293	int ret;
294
295	if (offset > umem->length || length > umem->length - offset) {
296		pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n",
297		       __func__, offset, umem->length, end);
298		return -EINVAL;
299	}
300
301	ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl,
302				 umem->sgt_append.sgt.orig_nents, dst, length,
303				 offset + ib_umem_offset(umem));
304
305	if (ret < 0)
306		return ret;
307	else if (ret != length)
308		return -EINVAL;
309	else
310		return 0;
311}
312EXPORT_SYMBOL(ib_umem_copy_from);
v5.9
  1/*
  2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
  3 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
  4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
 
  5 *
  6 * This software is available to you under a choice of one of two
  7 * licenses.  You may choose to be licensed under the terms of the GNU
  8 * General Public License (GPL) Version 2, available from the file
  9 * COPYING in the main directory of this source tree, or the
 10 * OpenIB.org BSD license below:
 11 *
 12 *     Redistribution and use in source and binary forms, with or
 13 *     without modification, are permitted provided that the following
 14 *     conditions are met:
 15 *
 16 *      - Redistributions of source code must retain the above
 17 *        copyright notice, this list of conditions and the following
 18 *        disclaimer.
 19 *
 20 *      - Redistributions in binary form must reproduce the above
 21 *        copyright notice, this list of conditions and the following
 22 *        disclaimer in the documentation and/or other materials
 23 *        provided with the distribution.
 24 *
 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 32 * SOFTWARE.
 33 */
 34
 35#include <linux/mm.h>
 36#include <linux/dma-mapping.h>
 37#include <linux/sched/signal.h>
 38#include <linux/sched/mm.h>
 39#include <linux/export.h>
 40#include <linux/slab.h>
 41#include <linux/pagemap.h>
 
 42#include <rdma/ib_umem_odp.h>
 43
 44#include "uverbs.h"
 45
 46static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
 47{
 48	struct sg_page_iter sg_iter;
 49	struct page *page;
 50
 51	if (umem->nmap > 0)
 52		ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
 53				DMA_BIDIRECTIONAL);
 54
 55	for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
 56		page = sg_page_iter_page(&sg_iter);
 57		unpin_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
 58	}
 59
 60	sg_free_table(&umem->sg_head);
 61}
 62
 63/* ib_umem_add_sg_table - Add N contiguous pages to scatter table
 64 *
 65 * sg: current scatterlist entry
 66 * page_list: array of npage struct page pointers
 67 * npages: number of pages in page_list
 68 * max_seg_sz: maximum segment size in bytes
 69 * nents: [out] number of entries in the scatterlist
 70 *
 71 * Return new end of scatterlist
 72 */
 73static struct scatterlist *ib_umem_add_sg_table(struct scatterlist *sg,
 74						struct page **page_list,
 75						unsigned long npages,
 76						unsigned int max_seg_sz,
 77						int *nents)
 78{
 79	unsigned long first_pfn;
 80	unsigned long i = 0;
 81	bool update_cur_sg = false;
 82	bool first = !sg_page(sg);
 83
 84	/* Check if new page_list is contiguous with end of previous page_list.
 85	 * sg->length here is a multiple of PAGE_SIZE and sg->offset is 0.
 86	 */
 87	if (!first && (page_to_pfn(sg_page(sg)) + (sg->length >> PAGE_SHIFT) ==
 88		       page_to_pfn(page_list[0])))
 89		update_cur_sg = true;
 90
 91	while (i != npages) {
 92		unsigned long len;
 93		struct page *first_page = page_list[i];
 94
 95		first_pfn = page_to_pfn(first_page);
 
 
 96
 97		/* Compute the number of contiguous pages we have starting
 98		 * at i
 99		 */
100		for (len = 0; i != npages &&
101			      first_pfn + len == page_to_pfn(page_list[i]) &&
102			      len < (max_seg_sz >> PAGE_SHIFT);
103		     len++)
104			i++;
105
106		/* Squash N contiguous pages from page_list into current sge */
107		if (update_cur_sg) {
108			if ((max_seg_sz - sg->length) >= (len << PAGE_SHIFT)) {
109				sg_set_page(sg, sg_page(sg),
110					    sg->length + (len << PAGE_SHIFT),
111					    0);
112				update_cur_sg = false;
113				continue;
114			}
115			update_cur_sg = false;
116		}
117
118		/* Squash N contiguous pages into next sge or first sge */
119		if (!first)
120			sg = sg_next(sg);
121
122		(*nents)++;
123		sg_set_page(sg, first_page, len << PAGE_SHIFT, 0);
124		first = false;
125	}
126
127	return sg;
128}
129
130/**
131 * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
132 *
133 * @umem: umem struct
134 * @pgsz_bitmap: bitmap of HW supported page sizes
135 * @virt: IOVA
136 *
137 * This helper is intended for HW that support multiple page
138 * sizes but can do only a single page size in an MR.
139 *
140 * Returns 0 if the umem requires page sizes not supported by
141 * the driver to be mapped. Drivers always supporting PAGE_SIZE
142 * or smaller will never see a 0 result.
143 */
144unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
145				     unsigned long pgsz_bitmap,
146				     unsigned long virt)
147{
148	struct scatterlist *sg;
149	unsigned int best_pg_bit;
150	unsigned long va, pgoff;
151	dma_addr_t mask;
152	int i;
153
154	/* At minimum, drivers must support PAGE_SIZE or smaller */
155	if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
156		return 0;
157
158	va = virt;
159	/* max page size not to exceed MR length */
160	mask = roundup_pow_of_two(umem->length);
 
 
 
 
 
 
 
 
 
 
 
161	/* offset into first SGL */
162	pgoff = umem->address & ~PAGE_MASK;
163
164	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
165		/* Walk SGL and reduce max page size if VA/PA bits differ
166		 * for any address.
167		 */
168		mask |= (sg_dma_address(sg) + pgoff) ^ va;
169		va += sg_dma_len(sg) - pgoff;
170		/* Except for the last entry, the ending iova alignment sets
171		 * the maximum possible page size as the low bits of the iova
172		 * must be zero when starting the next chunk.
173		 */
174		if (i != (umem->nmap - 1))
175			mask |= va;
176		pgoff = 0;
177	}
178	best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
179
180	return BIT_ULL(best_pg_bit);
 
 
 
 
 
 
181}
182EXPORT_SYMBOL(ib_umem_find_best_pgsz);
183
184/**
185 * ib_umem_get - Pin and DMA map userspace memory.
186 *
187 * @device: IB device to connect UMEM
188 * @addr: userspace virtual address to start at
189 * @size: length of region to pin
190 * @access: IB_ACCESS_xxx flags for memory being pinned
191 */
192struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
193			    size_t size, int access)
194{
195	struct ib_umem *umem;
196	struct page **page_list;
197	unsigned long lock_limit;
198	unsigned long new_pinned;
199	unsigned long cur_base;
200	unsigned long dma_attr = 0;
201	struct mm_struct *mm;
202	unsigned long npages;
203	int ret;
204	struct scatterlist *sg;
205	unsigned int gup_flags = FOLL_WRITE;
206
207	/*
208	 * If the combination of the addr and size requested for this memory
209	 * region causes an integer overflow, return error.
210	 */
211	if (((addr + size) < addr) ||
212	    PAGE_ALIGN(addr + size) < (addr + size))
213		return ERR_PTR(-EINVAL);
214
215	if (!can_do_mlock())
216		return ERR_PTR(-EPERM);
217
218	if (access & IB_ACCESS_ON_DEMAND)
219		return ERR_PTR(-EOPNOTSUPP);
220
221	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
222	if (!umem)
223		return ERR_PTR(-ENOMEM);
224	umem->ibdev      = device;
225	umem->length     = size;
226	umem->address    = addr;
 
 
 
 
 
227	umem->writable   = ib_access_writable(access);
228	umem->owning_mm = mm = current->mm;
229	mmgrab(mm);
230
231	page_list = (struct page **) __get_free_page(GFP_KERNEL);
232	if (!page_list) {
233		ret = -ENOMEM;
234		goto umem_kfree;
235	}
236
237	npages = ib_umem_num_pages(umem);
238	if (npages == 0 || npages > UINT_MAX) {
239		ret = -EINVAL;
240		goto out;
241	}
242
243	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
244
245	new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
246	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
247		atomic64_sub(npages, &mm->pinned_vm);
248		ret = -ENOMEM;
249		goto out;
250	}
251
252	cur_base = addr & PAGE_MASK;
253
254	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
255	if (ret)
256		goto vma;
257
258	if (!umem->writable)
259		gup_flags |= FOLL_FORCE;
260
261	sg = umem->sg_head.sgl;
262
263	while (npages) {
264		cond_resched();
265		ret = pin_user_pages_fast(cur_base,
266					  min_t(unsigned long, npages,
267						PAGE_SIZE /
268						sizeof(struct page *)),
269					  gup_flags | FOLL_LONGTERM, page_list);
270		if (ret < 0)
 
271			goto umem_release;
 
272
273		cur_base += ret * PAGE_SIZE;
274		npages   -= ret;
275
276		sg = ib_umem_add_sg_table(sg, page_list, ret,
277			dma_get_max_seg_size(device->dma_device),
278			&umem->sg_nents);
 
 
 
 
279	}
280
281	sg_mark_end(sg);
282
283	if (access & IB_ACCESS_RELAXED_ORDERING)
284		dma_attr |= DMA_ATTR_WEAK_ORDERING;
285
286	umem->nmap =
287		ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
288				    DMA_BIDIRECTIONAL, dma_attr);
289
290	if (!umem->nmap) {
291		ret = -ENOMEM;
292		goto umem_release;
293	}
294
295	ret = 0;
296	goto out;
297
298umem_release:
299	__ib_umem_release(device, umem, 0);
300vma:
301	atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
302out:
303	free_page((unsigned long) page_list);
304umem_kfree:
305	if (ret) {
306		mmdrop(umem->owning_mm);
307		kfree(umem);
308	}
309	return ret ? ERR_PTR(ret) : umem;
310}
311EXPORT_SYMBOL(ib_umem_get);
312
313/**
314 * ib_umem_release - release memory pinned with ib_umem_get
315 * @umem: umem struct to release
316 */
317void ib_umem_release(struct ib_umem *umem)
318{
319	if (!umem)
320		return;
 
 
321	if (umem->is_odp)
322		return ib_umem_odp_release(to_ib_umem_odp(umem));
323
324	__ib_umem_release(umem->ibdev, umem, 1);
325
326	atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
327	mmdrop(umem->owning_mm);
328	kfree(umem);
329}
330EXPORT_SYMBOL(ib_umem_release);
331
332int ib_umem_page_count(struct ib_umem *umem)
333{
334	int i, n = 0;
335	struct scatterlist *sg;
336
337	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
338		n += sg_dma_len(sg) >> PAGE_SHIFT;
339
340	return n;
341}
342EXPORT_SYMBOL(ib_umem_page_count);
343
344/*
345 * Copy from the given ib_umem's pages to the given buffer.
346 *
347 * umem - the umem to copy from
348 * offset - offset to start copying from
349 * dst - destination buffer
350 * length - buffer length
351 *
352 * Returns 0 on success, or an error code.
353 */
354int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
355		      size_t length)
356{
357	size_t end = offset + length;
358	int ret;
359
360	if (offset > umem->length || length > umem->length - offset) {
361		pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
362		       offset, umem->length, end);
363		return -EINVAL;
364	}
365
366	ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
 
367				 offset + ib_umem_offset(umem));
368
369	if (ret < 0)
370		return ret;
371	else if (ret != length)
372		return -EINVAL;
373	else
374		return 0;
375}
376EXPORT_SYMBOL(ib_umem_copy_from);