Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 33#include <linux/types.h>
 34#include <linux/sched.h>
 35#include <linux/sched/mm.h>
 36#include <linux/sched/task.h>
 37#include <linux/pid.h>
 38#include <linux/slab.h>
 39#include <linux/export.h>
 40#include <linux/vmalloc.h>
 41#include <linux/hugetlb.h>
 42#include <linux/interval_tree.h>
 43#include <linux/hmm.h>
 44#include <linux/pagemap.h>
 45
 
 
 46#include <rdma/ib_umem_odp.h>
 47
 48#include "uverbs.h"
 
 
 
 
 
 
 49
 50static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
 51				   const struct mmu_interval_notifier_ops *ops)
 52{
 53	int ret;
 
 54
 55	umem_odp->umem.is_odp = 1;
 56	mutex_init(&umem_odp->umem_mutex);
 57
 58	if (!umem_odp->is_implicit_odp) {
 59		size_t page_size = 1UL << umem_odp->page_shift;
 60		unsigned long start;
 61		unsigned long end;
 62		size_t ndmas, npfns;
 63
 64		start = ALIGN_DOWN(umem_odp->umem.address, page_size);
 65		if (check_add_overflow(umem_odp->umem.address,
 66				       (unsigned long)umem_odp->umem.length,
 67				       &end))
 68			return -EOVERFLOW;
 69		end = ALIGN(end, page_size);
 70		if (unlikely(end < page_size))
 71			return -EOVERFLOW;
 72
 73		ndmas = (end - start) >> umem_odp->page_shift;
 74		if (!ndmas)
 75			return -EINVAL;
 76
 77		npfns = (end - start) >> PAGE_SHIFT;
 78		umem_odp->pfn_list = kvcalloc(
 79			npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
 80		if (!umem_odp->pfn_list)
 81			return -ENOMEM;
 82
 83		umem_odp->dma_list = kvcalloc(
 84			ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL);
 85		if (!umem_odp->dma_list) {
 86			ret = -ENOMEM;
 87			goto out_pfn_list;
 88		}
 89
 90		ret = mmu_interval_notifier_insert(&umem_odp->notifier,
 91						   umem_odp->umem.owning_mm,
 92						   start, end - start, ops);
 93		if (ret)
 94			goto out_dma_list;
 
 
 
 
 
 95	}
 
 
 96
 97	return 0;
 
 
 98
 99out_dma_list:
100	kvfree(umem_odp->dma_list);
101out_pfn_list:
102	kvfree(umem_odp->pfn_list);
103	return ret;
 
 
 
 
 
 
 
 
104}
105
106/**
107 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem
108 *
109 * Implicit ODP umems do not have a VA range and do not have any page lists.
110 * They exist only to hold the per_mm reference to help the driver create
111 * children umems.
 
112 *
113 * @device: IB device to create UMEM
114 * @access: ib_reg_mr access flags
115 */
116struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
117					       int access)
118{
119	struct ib_umem *umem;
120	struct ib_umem_odp *umem_odp;
121	int ret;
122
123	if (access & IB_ACCESS_HUGETLB)
124		return ERR_PTR(-EINVAL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126	umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
127	if (!umem_odp)
128		return ERR_PTR(-ENOMEM);
129	umem = &umem_odp->umem;
130	umem->ibdev = device;
131	umem->writable = ib_access_writable(access);
132	umem->owning_mm = current->mm;
133	umem_odp->is_implicit_odp = 1;
134	umem_odp->page_shift = PAGE_SHIFT;
135
136	umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
137	ret = ib_init_umem_odp(umem_odp, NULL);
138	if (ret) {
139		put_pid(umem_odp->tgid);
140		kfree(umem_odp);
141		return ERR_PTR(ret);
142	}
143	return umem_odp;
144}
145EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
146
147/**
148 * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit
149 *                           parent ODP umem
150 *
151 * @root: The parent umem enclosing the child. This must be allocated using
152 *        ib_alloc_implicit_odp_umem()
153 * @addr: The starting userspace VA
154 * @size: The length of the userspace VA
155 * @ops: MMU interval ops, currently only @invalidate
156 */
157struct ib_umem_odp *
158ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
159			size_t size,
160			const struct mmu_interval_notifier_ops *ops)
161{
162	/*
163	 * Caller must ensure that root cannot be freed during the call to
164	 * ib_alloc_odp_umem.
165	 */
166	struct ib_umem_odp *odp_data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167	struct ib_umem *umem;
 
 
168	int ret;
169
170	if (WARN_ON(!root->is_implicit_odp))
171		return ERR_PTR(-EINVAL);
172
173	odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
174	if (!odp_data)
175		return ERR_PTR(-ENOMEM);
176	umem = &odp_data->umem;
177	umem->ibdev = root->umem.ibdev;
178	umem->length     = size;
179	umem->address    = addr;
180	umem->writable   = root->umem.writable;
181	umem->owning_mm  = root->umem.owning_mm;
182	odp_data->page_shift = PAGE_SHIFT;
183	odp_data->notifier.ops = ops;
184
185	/*
186	 * A mmget must be held when registering a notifier, the owming_mm only
187	 * has a mm_grab at this point.
188	 */
189	if (!mmget_not_zero(umem->owning_mm)) {
190		ret = -EFAULT;
191		goto out_free;
192	}
193
194	odp_data->tgid = get_pid(root->tgid);
195	ret = ib_init_umem_odp(odp_data, ops);
196	if (ret)
197		goto out_tgid;
198	mmput(umem->owning_mm);
199	return odp_data;
200
201out_tgid:
202	put_pid(odp_data->tgid);
203	mmput(umem->owning_mm);
204out_free:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205	kfree(odp_data);
 
 
206	return ERR_PTR(ret);
207}
208EXPORT_SYMBOL(ib_umem_odp_alloc_child);
209
210/**
211 * ib_umem_odp_get - Create a umem_odp for a userspace va
212 *
213 * @device: IB device struct to get UMEM
214 * @addr: userspace virtual address to start at
215 * @size: length of region to pin
216 * @access: IB_ACCESS_xxx flags for memory being pinned
217 * @ops: MMU interval ops, currently only @invalidate
218 *
219 * The driver should use when the access flags indicate ODP memory. It avoids
220 * pinning, instead, stores the mm for future page fault handling in
221 * conjunction with MMU notifiers.
222 */
223struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
224				    unsigned long addr, size_t size, int access,
225				    const struct mmu_interval_notifier_ops *ops)
226{
227	struct ib_umem_odp *umem_odp;
228	int ret;
 
229
230	if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
231		return ERR_PTR(-EINVAL);
232
233	umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
234	if (!umem_odp)
235		return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
237	umem_odp->umem.ibdev = device;
238	umem_odp->umem.length = size;
239	umem_odp->umem.address = addr;
240	umem_odp->umem.writable = ib_access_writable(access);
241	umem_odp->umem.owning_mm = current->mm;
242	umem_odp->notifier.ops = ops;
243
244	umem_odp->page_shift = PAGE_SHIFT;
245#ifdef CONFIG_HUGETLB_PAGE
246	if (access & IB_ACCESS_HUGETLB)
247		umem_odp->page_shift = HPAGE_SHIFT;
248#endif
249
250	umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
251	ret = ib_init_umem_odp(umem_odp, ops);
252	if (ret)
253		goto err_put_pid;
254	return umem_odp;
255
256err_put_pid:
257	put_pid(umem_odp->tgid);
258	kfree(umem_odp);
259	return ERR_PTR(ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260}
261EXPORT_SYMBOL(ib_umem_odp_get);
262
263void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
264{
 
 
265	/*
266	 * Ensure that no more pages are mapped in the umem.
267	 *
268	 * It is the driver's responsibility to ensure, before calling us,
269	 * that the hardware will not attempt to access the MR any more.
270	 */
271	if (!umem_odp->is_implicit_odp) {
272		mutex_lock(&umem_odp->umem_mutex);
273		ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
274					    ib_umem_end(umem_odp));
275		mutex_unlock(&umem_odp->umem_mutex);
276		mmu_interval_notifier_remove(&umem_odp->notifier);
277		kvfree(umem_odp->dma_list);
278		kvfree(umem_odp->pfn_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279	}
280	put_pid(umem_odp->tgid);
281	kfree(umem_odp);
 
 
 
 
 
282}
283EXPORT_SYMBOL(ib_umem_odp_release);
284
285/*
286 * Map for DMA and insert a single page into the on-demand paging page tables.
287 *
288 * @umem: the umem to insert the page to.
289 * @dma_index: index in the umem to add the dma to.
290 * @page: the page struct to map and add.
291 * @access_mask: access permissions needed for this page.
 
 
 
292 *
293 * The function returns -EFAULT if the DMA mapping operation fails.
 
294 *
 
 
 
295 */
296static int ib_umem_odp_map_dma_single_page(
297		struct ib_umem_odp *umem_odp,
298		unsigned int dma_index,
299		struct page *page,
300		u64 access_mask)
 
301{
302	struct ib_device *dev = umem_odp->umem.ibdev;
303	dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index];
 
 
 
304
305	if (*dma_addr) {
306		/*
307		 * If the page is already dma mapped it means it went through
308		 * a non-invalidating trasition, like read-only to writable.
309		 * Resync the flags.
310		 */
311		*dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask;
312		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313	}
314
315	*dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
316				    DMA_BIDIRECTIONAL);
317	if (ib_dma_mapping_error(dev, *dma_addr)) {
318		*dma_addr = 0;
319		return -EFAULT;
320	}
321	umem_odp->npages++;
322	*dma_addr |= access_mask;
323	return 0;
324}
325
326/**
327 * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
328 *
329 * Maps the range passed in the argument to DMA addresses.
330 * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
331 * Upon success the ODP MR will be locked to let caller complete its device
332 * page table update.
333 *
334 * Returns the number of pages mapped in success, negative error code
335 * for failure.
336 * @umem_odp: the umem to map and pin
 
 
 
 
337 * @user_virt: the address from which we need to map.
338 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
339 *        bigger due to alignment, and may also be smaller in case of an error
340 *        pinning or mapping a page. The actual pages mapped is returned in
341 *        the return value.
342 * @access_mask: bit mask of the requested access permissions for the given
343 *               range.
344 * @fault: is faulting required for the given range
 
 
345 */
346int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
347				 u64 bcnt, u64 access_mask, bool fault)
348			__acquires(&umem_odp->umem_mutex)
349{
350	struct task_struct *owning_process  = NULL;
351	struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
352	int pfn_index, dma_index, ret = 0, start_idx;
353	unsigned int page_shift, hmm_order, pfn_start_idx;
354	unsigned long num_pfns, current_seq;
355	struct hmm_range range = {};
356	unsigned long timeout;
357
358	if (access_mask == 0)
359		return -EINVAL;
360
361	if (user_virt < ib_umem_start(umem_odp) ||
362	    user_virt + bcnt > ib_umem_end(umem_odp))
363		return -EFAULT;
364
365	page_shift = umem_odp->page_shift;
 
 
 
 
 
 
 
 
366
367	/*
368	 * owning_process is allowed to be NULL, this means somehow the mm is
369	 * existing beyond the lifetime of the originating process.. Presumably
370	 * mmget_not_zero will fail in this case.
371	 */
372	owning_process = get_pid_task(umem_odp->tgid, PIDTYPE_PID);
373	if (!owning_process || !mmget_not_zero(owning_mm)) {
374		ret = -EINVAL;
375		goto out_put_task;
376	}
377
378	range.notifier = &umem_odp->notifier;
379	range.start = ALIGN_DOWN(user_virt, 1UL << page_shift);
380	range.end = ALIGN(user_virt + bcnt, 1UL << page_shift);
381	pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
382	num_pfns = (range.end - range.start) >> PAGE_SHIFT;
383	if (fault) {
384		range.default_flags = HMM_PFN_REQ_FAULT;
385
386		if (access_mask & ODP_WRITE_ALLOWED_BIT)
387			range.default_flags |= HMM_PFN_REQ_WRITE;
388	}
389
390	range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]);
391	timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
392
393retry:
394	current_seq = range.notifier_seq =
395		mmu_interval_read_begin(&umem_odp->notifier);
396
397	mmap_read_lock(owning_mm);
398	ret = hmm_range_fault(&range);
399	mmap_read_unlock(owning_mm);
400	if (unlikely(ret)) {
401		if (ret == -EBUSY && !time_after(jiffies, timeout))
402			goto retry;
403		goto out_put_mm;
404	}
405
406	start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift;
407	dma_index = start_idx;
 
 
408
409	mutex_lock(&umem_odp->umem_mutex);
410	if (mmu_interval_read_retry(&umem_odp->notifier, current_seq)) {
411		mutex_unlock(&umem_odp->umem_mutex);
412		goto retry;
413	}
 
 
 
 
 
 
 
414
415	for (pfn_index = 0; pfn_index < num_pfns;
416		pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
417
418		if (fault) {
419			/*
420			 * Since we asked for hmm_range_fault() to populate
421			 * pages it shouldn't return an error entry on success.
422			 */
423			WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
424			WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
425		} else {
426			if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) {
427				WARN_ON(umem_odp->dma_list[dma_index]);
428				continue;
429			}
430			access_mask = ODP_READ_ALLOWED_BIT;
431			if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE)
432				access_mask |= ODP_WRITE_ALLOWED_BIT;
433		}
434
435		hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
436		/* If a hugepage was detected and ODP wasn't set for, the umem
437		 * page_shift will be used, the opposite case is an error.
438		 */
439		if (hmm_order + PAGE_SHIFT < page_shift) {
440			ret = -EINVAL;
441			ibdev_dbg(umem_odp->umem.ibdev,
442				  "%s: un-expected hmm_order %u, page_shift %u\n",
443				  __func__, hmm_order, page_shift);
444			break;
445		}
 
446
447		ret = ib_umem_odp_map_dma_single_page(
448				umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]),
449				access_mask);
450		if (ret < 0) {
451			ibdev_dbg(umem_odp->umem.ibdev,
452				  "ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
 
453			break;
454		}
455	}
456	/* upon success lock should stay on hold for the callee */
457	if (!ret)
458		ret = dma_index - start_idx;
459	else
460		mutex_unlock(&umem_odp->umem_mutex);
461
462out_put_mm:
463	mmput_async(owning_mm);
 
 
 
 
 
 
464out_put_task:
465	if (owning_process)
466		put_task_struct(owning_process);
 
467	return ret;
468}
469EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
470
471void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
472				 u64 bound)
473{
474	dma_addr_t dma_addr;
475	dma_addr_t dma;
476	int idx;
477	u64 addr;
478	struct ib_device *dev = umem_odp->umem.ibdev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479
480	lockdep_assert_held(&umem_odp->umem_mutex);
481
482	virt = max_t(u64, virt, ib_umem_start(umem_odp));
483	bound = min_t(u64, bound, ib_umem_end(umem_odp));
484	for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
485		idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
486		dma = umem_odp->dma_list[idx];
487
488		/* The access flags guaranteed a valid DMA address in case was NULL */
489		if (dma) {
490			unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
491			struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
492
493			dma_addr = dma & ODP_DMA_ADDR_MASK;
494			ib_dma_unmap_page(dev, dma_addr,
495					  BIT(umem_odp->page_shift),
496					  DMA_BIDIRECTIONAL);
497			if (dma & ODP_WRITE_ALLOWED_BIT) {
498				struct page *head_page = compound_head(page);
499				/*
500				 * set_page_dirty prefers being called with
501				 * the page lock. However, MMU notifiers are
502				 * called sometimes with and sometimes without
503				 * the lock. We rely on the umem_mutex instead
504				 * to prevent other mmu notifiers from
505				 * continuing and allowing the page mapping to
506				 * be removed.
507				 */
508				set_page_dirty(head_page);
509			}
510			umem_odp->dma_list[idx] = 0;
511			umem_odp->npages--;
 
 
 
 
512		}
513	}
 
514}
515EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
v4.17
  1/*
  2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 33#include <linux/types.h>
 34#include <linux/sched.h>
 35#include <linux/sched/mm.h>
 36#include <linux/sched/task.h>
 37#include <linux/pid.h>
 38#include <linux/slab.h>
 39#include <linux/export.h>
 40#include <linux/vmalloc.h>
 41#include <linux/hugetlb.h>
 42#include <linux/interval_tree_generic.h>
 
 
 43
 44#include <rdma/ib_verbs.h>
 45#include <rdma/ib_umem.h>
 46#include <rdma/ib_umem_odp.h>
 47
 48/*
 49 * The ib_umem list keeps track of memory regions for which the HW
 50 * device request to receive notification when the related memory
 51 * mapping is changed.
 52 *
 53 * ib_umem_lock protects the list.
 54 */
 55
 56static u64 node_start(struct umem_odp_node *n)
 
 57{
 58	struct ib_umem_odp *umem_odp =
 59			container_of(n, struct ib_umem_odp, interval_tree);
 60
 61	return ib_umem_start(umem_odp->umem);
 62}
 63
 64/* Note that the representation of the intervals in the interval tree
 65 * considers the ending point as contained in the interval, while the
 66 * function ib_umem_end returns the first address which is not contained
 67 * in the umem.
 68 */
 69static u64 node_last(struct umem_odp_node *n)
 70{
 71	struct ib_umem_odp *umem_odp =
 72			container_of(n, struct ib_umem_odp, interval_tree);
 
 
 
 
 
 73
 74	return ib_umem_end(umem_odp->umem) - 1;
 75}
 
 76
 77INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
 78		     node_start, node_last, static, rbt_ib_umem)
 79
 80static void ib_umem_notifier_start_account(struct ib_umem *item)
 81{
 82	mutex_lock(&item->odp_data->umem_mutex);
 
 
 
 
 
 
 83
 84	/* Only update private counters for this umem if it has them.
 85	 * Otherwise skip it. All page faults will be delayed for this umem. */
 86	if (item->odp_data->mn_counters_active) {
 87		int notifiers_count = item->odp_data->notifiers_count++;
 88
 89		if (notifiers_count == 0)
 90			/* Initialize the completion object for waiting on
 91			 * notifiers. Since notifier_count is zero, no one
 92			 * should be waiting right now. */
 93			reinit_completion(&item->odp_data->notifier_completion);
 94	}
 95	mutex_unlock(&item->odp_data->umem_mutex);
 96}
 97
 98static void ib_umem_notifier_end_account(struct ib_umem *item)
 99{
100	mutex_lock(&item->odp_data->umem_mutex);
101
102	/* Only update private counters for this umem if it has them.
103	 * Otherwise skip it. All page faults will be delayed for this umem. */
104	if (item->odp_data->mn_counters_active) {
105		/*
106		 * This sequence increase will notify the QP page fault that
107		 * the page that is going to be mapped in the spte could have
108		 * been freed.
109		 */
110		++item->odp_data->notifiers_seq;
111		if (--item->odp_data->notifiers_count == 0)
112			complete_all(&item->odp_data->notifier_completion);
113	}
114	mutex_unlock(&item->odp_data->umem_mutex);
115}
116
117/* Account for a new mmu notifier in an ib_ucontext. */
118static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
119{
120	atomic_inc(&context->notifier_count);
121}
122
123/* Account for a terminating mmu notifier in an ib_ucontext.
124 *
125 * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
126 * the function takes the semaphore itself. */
127static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
 
 
128{
129	int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
 
 
130
131	if (zero_notifiers &&
132	    !list_empty(&context->no_private_counters)) {
133		/* No currently running mmu notifiers. Now is the chance to
134		 * add private accounting to all previously added umems. */
135		struct ib_umem_odp *odp_data, *next;
136
137		/* Prevent concurrent mmu notifiers from working on the
138		 * no_private_counters list. */
139		down_write(&context->umem_rwsem);
140
141		/* Read the notifier_count again, with the umem_rwsem
142		 * semaphore taken for write. */
143		if (!atomic_read(&context->notifier_count)) {
144			list_for_each_entry_safe(odp_data, next,
145						 &context->no_private_counters,
146						 no_private_counters) {
147				mutex_lock(&odp_data->umem_mutex);
148				odp_data->mn_counters_active = true;
149				list_del(&odp_data->no_private_counters);
150				complete_all(&odp_data->notifier_completion);
151				mutex_unlock(&odp_data->umem_mutex);
152			}
153		}
154
155		up_write(&context->umem_rwsem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156	}
 
157}
 
158
159static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
160					       u64 end, void *cookie) {
 
 
 
 
 
 
 
 
 
 
 
 
 
161	/*
162	 * Increase the number of notifiers running, to
163	 * prevent any further fault handling on this MR.
164	 */
165	ib_umem_notifier_start_account(item);
166	item->odp_data->dying = 1;
167	/* Make sure that the fact the umem is dying is out before we release
168	 * all pending page faults. */
169	smp_wmb();
170	complete_all(&item->odp_data->notifier_completion);
171	item->context->invalidate_range(item, ib_umem_start(item),
172					ib_umem_end(item));
173	return 0;
174}
175
176static void ib_umem_notifier_release(struct mmu_notifier *mn,
177				     struct mm_struct *mm)
178{
179	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
180
181	if (!context->invalidate_range)
182		return;
183
184	ib_ucontext_notifier_start_account(context);
185	down_read(&context->umem_rwsem);
186	rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
187				      ULLONG_MAX,
188				      ib_umem_notifier_release_trampoline,
189				      NULL);
190	up_read(&context->umem_rwsem);
191}
192
193static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
194				      u64 end, void *cookie)
195{
196	ib_umem_notifier_start_account(item);
197	item->context->invalidate_range(item, start, start + PAGE_SIZE);
198	ib_umem_notifier_end_account(item);
199	return 0;
200}
201
202static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
203					     u64 end, void *cookie)
204{
205	ib_umem_notifier_start_account(item);
206	item->context->invalidate_range(item, start, end);
207	return 0;
208}
209
210static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
211						    struct mm_struct *mm,
212						    unsigned long start,
213						    unsigned long end)
214{
215	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
216
217	if (!context->invalidate_range)
218		return;
219
220	ib_ucontext_notifier_start_account(context);
221	down_read(&context->umem_rwsem);
222	rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
223				      end,
224				      invalidate_range_start_trampoline, NULL);
225	up_read(&context->umem_rwsem);
226}
227
228static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
229					   u64 end, void *cookie)
230{
231	ib_umem_notifier_end_account(item);
232	return 0;
233}
234
235static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
236						  struct mm_struct *mm,
237						  unsigned long start,
238						  unsigned long end)
239{
240	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
241
242	if (!context->invalidate_range)
243		return;
244
245	down_read(&context->umem_rwsem);
246	rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
247				      end,
248				      invalidate_range_end_trampoline, NULL);
249	up_read(&context->umem_rwsem);
250	ib_ucontext_notifier_end_account(context);
251}
252
253static const struct mmu_notifier_ops ib_umem_notifiers = {
254	.release                    = ib_umem_notifier_release,
255	.invalidate_range_start     = ib_umem_notifier_invalidate_range_start,
256	.invalidate_range_end       = ib_umem_notifier_invalidate_range_end,
257};
258
259struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
260				  unsigned long addr,
261				  size_t size)
262{
263	struct ib_umem *umem;
264	struct ib_umem_odp *odp_data;
265	int pages = size >> PAGE_SHIFT;
266	int ret;
267
268	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
269	if (!umem)
 
 
 
270		return ERR_PTR(-ENOMEM);
271
272	umem->context    = context;
273	umem->length     = size;
274	umem->address    = addr;
275	umem->page_shift = PAGE_SHIFT;
276	umem->writable   = 1;
 
 
277
278	odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
279	if (!odp_data) {
280		ret = -ENOMEM;
281		goto out_umem;
282	}
283	odp_data->umem = umem;
284
285	mutex_init(&odp_data->umem_mutex);
286	init_completion(&odp_data->notifier_completion);
287
288	odp_data->page_list = vzalloc(pages * sizeof(*odp_data->page_list));
289	if (!odp_data->page_list) {
290		ret = -ENOMEM;
291		goto out_odp_data;
292	}
293
294	odp_data->dma_list = vzalloc(pages * sizeof(*odp_data->dma_list));
295	if (!odp_data->dma_list) {
296		ret = -ENOMEM;
297		goto out_page_list;
298	}
299
300	down_write(&context->umem_rwsem);
301	context->odp_mrs_count++;
302	rbt_ib_umem_insert(&odp_data->interval_tree, &context->umem_tree);
303	if (likely(!atomic_read(&context->notifier_count)))
304		odp_data->mn_counters_active = true;
305	else
306		list_add(&odp_data->no_private_counters,
307			 &context->no_private_counters);
308	up_write(&context->umem_rwsem);
309
310	umem->odp_data = odp_data;
311
312	return umem;
313
314out_page_list:
315	vfree(odp_data->page_list);
316out_odp_data:
317	kfree(odp_data);
318out_umem:
319	kfree(umem);
320	return ERR_PTR(ret);
321}
322EXPORT_SYMBOL(ib_alloc_odp_umem);
323
324int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
325		    int access)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326{
327	int ret_val;
328	struct pid *our_pid;
329	struct mm_struct *mm = get_task_mm(current);
330
331	if (!mm)
332		return -EINVAL;
333
334	if (access & IB_ACCESS_HUGETLB) {
335		struct vm_area_struct *vma;
336		struct hstate *h;
337
338		down_read(&mm->mmap_sem);
339		vma = find_vma(mm, ib_umem_start(umem));
340		if (!vma || !is_vm_hugetlb_page(vma)) {
341			up_read(&mm->mmap_sem);
342			return -EINVAL;
343		}
344		h = hstate_vma(vma);
345		umem->page_shift = huge_page_shift(h);
346		up_read(&mm->mmap_sem);
347		umem->hugetlb = 1;
348	} else {
349		umem->hugetlb = 0;
350	}
351
352	/* Prevent creating ODP MRs in child processes */
353	rcu_read_lock();
354	our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
355	rcu_read_unlock();
356	put_pid(our_pid);
357	if (context->tgid != our_pid) {
358		ret_val = -EINVAL;
359		goto out_mm;
360	}
361
362	umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
363	if (!umem->odp_data) {
364		ret_val = -ENOMEM;
365		goto out_mm;
366	}
367	umem->odp_data->umem = umem;
368
369	mutex_init(&umem->odp_data->umem_mutex);
370
371	init_completion(&umem->odp_data->notifier_completion);
372
373	if (ib_umem_num_pages(umem)) {
374		umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
375					    sizeof(*umem->odp_data->page_list));
376		if (!umem->odp_data->page_list) {
377			ret_val = -ENOMEM;
378			goto out_odp_data;
379		}
380
381		umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
382					  sizeof(*umem->odp_data->dma_list));
383		if (!umem->odp_data->dma_list) {
384			ret_val = -ENOMEM;
385			goto out_page_list;
386		}
387	}
388
389	/*
390	 * When using MMU notifiers, we will get a
391	 * notification before the "current" task (and MM) is
392	 * destroyed. We use the umem_rwsem semaphore to synchronize.
393	 */
394	down_write(&context->umem_rwsem);
395	context->odp_mrs_count++;
396	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
397		rbt_ib_umem_insert(&umem->odp_data->interval_tree,
398				   &context->umem_tree);
399	if (likely(!atomic_read(&context->notifier_count)) ||
400	    context->odp_mrs_count == 1)
401		umem->odp_data->mn_counters_active = true;
402	else
403		list_add(&umem->odp_data->no_private_counters,
404			 &context->no_private_counters);
405	downgrade_write(&context->umem_rwsem);
406
407	if (context->odp_mrs_count == 1) {
408		/*
409		 * Note that at this point, no MMU notifier is running
410		 * for this context!
411		 */
412		atomic_set(&context->notifier_count, 0);
413		INIT_HLIST_NODE(&context->mn.hlist);
414		context->mn.ops = &ib_umem_notifiers;
415		/*
416		 * Lock-dep detects a false positive for mmap_sem vs.
417		 * umem_rwsem, due to not grasping downgrade_write correctly.
418		 */
419		lockdep_off();
420		ret_val = mmu_notifier_register(&context->mn, mm);
421		lockdep_on();
422		if (ret_val) {
423			pr_err("Failed to register mmu_notifier %d\n", ret_val);
424			ret_val = -EBUSY;
425			goto out_mutex;
426		}
427	}
428
429	up_read(&context->umem_rwsem);
430
431	/*
432	 * Note that doing an mmput can cause a notifier for the relevant mm.
433	 * If the notifier is called while we hold the umem_rwsem, this will
434	 * cause a deadlock. Therefore, we release the reference only after we
435	 * released the semaphore.
436	 */
437	mmput(mm);
438	return 0;
439
440out_mutex:
441	up_read(&context->umem_rwsem);
442	vfree(umem->odp_data->dma_list);
443out_page_list:
444	vfree(umem->odp_data->page_list);
445out_odp_data:
446	kfree(umem->odp_data);
447out_mm:
448	mmput(mm);
449	return ret_val;
450}
 
451
452void ib_umem_odp_release(struct ib_umem *umem)
453{
454	struct ib_ucontext *context = umem->context;
455
456	/*
457	 * Ensure that no more pages are mapped in the umem.
458	 *
459	 * It is the driver's responsibility to ensure, before calling us,
460	 * that the hardware will not attempt to access the MR any more.
461	 */
462	ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
463				    ib_umem_end(umem));
464
465	down_write(&context->umem_rwsem);
466	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
467		rbt_ib_umem_remove(&umem->odp_data->interval_tree,
468				   &context->umem_tree);
469	context->odp_mrs_count--;
470	if (!umem->odp_data->mn_counters_active) {
471		list_del(&umem->odp_data->no_private_counters);
472		complete_all(&umem->odp_data->notifier_completion);
473	}
474
475	/*
476	 * Downgrade the lock to a read lock. This ensures that the notifiers
477	 * (who lock the mutex for reading) will be able to finish, and we
478	 * will be able to enventually obtain the mmu notifiers SRCU. Note
479	 * that since we are doing it atomically, no other user could register
480	 * and unregister while we do the check.
481	 */
482	downgrade_write(&context->umem_rwsem);
483	if (!context->odp_mrs_count) {
484		struct task_struct *owning_process = NULL;
485		struct mm_struct *owning_mm        = NULL;
486
487		owning_process = get_pid_task(context->tgid,
488					      PIDTYPE_PID);
489		if (owning_process == NULL)
490			/*
491			 * The process is already dead, notifier were removed
492			 * already.
493			 */
494			goto out;
495
496		owning_mm = get_task_mm(owning_process);
497		if (owning_mm == NULL)
498			/*
499			 * The process' mm is already dead, notifier were
500			 * removed already.
501			 */
502			goto out_put_task;
503		mmu_notifier_unregister(&context->mn, owning_mm);
504
505		mmput(owning_mm);
506
507out_put_task:
508		put_task_struct(owning_process);
509	}
510out:
511	up_read(&context->umem_rwsem);
512
513	vfree(umem->odp_data->dma_list);
514	vfree(umem->odp_data->page_list);
515	kfree(umem->odp_data);
516	kfree(umem);
517}
 
518
519/*
520 * Map for DMA and insert a single page into the on-demand paging page tables.
521 *
522 * @umem: the umem to insert the page to.
523 * @page_index: index in the umem to add the page to.
524 * @page: the page struct to map and add.
525 * @access_mask: access permissions needed for this page.
526 * @current_seq: sequence number for synchronization with invalidations.
527 *               the sequence number is taken from
528 *               umem->odp_data->notifiers_seq.
529 *
530 * The function returns -EFAULT if the DMA mapping operation fails. It returns
531 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
532 *
533 * The page is released via put_page even if the operation failed. For
534 * on-demand pinning, the page is released whenever it isn't stored in the
535 * umem.
536 */
537static int ib_umem_odp_map_dma_single_page(
538		struct ib_umem *umem,
539		int page_index,
540		struct page *page,
541		u64 access_mask,
542		unsigned long current_seq)
543{
544	struct ib_device *dev = umem->context->device;
545	dma_addr_t dma_addr;
546	int stored_page = 0;
547	int remove_existing_mapping = 0;
548	int ret = 0;
549
550	/*
551	 * Note: we avoid writing if seq is different from the initial seq, to
552	 * handle case of a racing notifier. This check also allows us to bail
553	 * early if we have a notifier running in parallel with us.
554	 */
555	if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
556		ret = -EAGAIN;
557		goto out;
558	}
559	if (!(umem->odp_data->dma_list[page_index])) {
560		dma_addr = ib_dma_map_page(dev,
561					   page,
562					   0, BIT(umem->page_shift),
563					   DMA_BIDIRECTIONAL);
564		if (ib_dma_mapping_error(dev, dma_addr)) {
565			ret = -EFAULT;
566			goto out;
567		}
568		umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
569		umem->odp_data->page_list[page_index] = page;
570		umem->npages++;
571		stored_page = 1;
572	} else if (umem->odp_data->page_list[page_index] == page) {
573		umem->odp_data->dma_list[page_index] |= access_mask;
574	} else {
575		pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
576		       umem->odp_data->page_list[page_index], page);
577		/* Better remove the mapping now, to prevent any further
578		 * damage. */
579		remove_existing_mapping = 1;
580	}
581
582out:
583	/* On Demand Paging - avoid pinning the page */
584	if (umem->context->invalidate_range || !stored_page)
585		put_page(page);
586
587	if (remove_existing_mapping && umem->context->invalidate_range) {
588		invalidate_page_trampoline(
589			umem,
590			ib_umem_start(umem) + (page_index >> umem->page_shift),
591			ib_umem_start(umem) + ((page_index + 1) >>
592					       umem->page_shift),
593			NULL);
594		ret = -EAGAIN;
595	}
596
597	return ret;
 
 
 
 
 
 
 
 
598}
599
600/**
601 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
602 *
603 * Pins the range of pages passed in the argument, and maps them to
604 * DMA addresses. The DMA addresses of the mapped pages is updated in
605 * umem->odp_data->dma_list.
 
606 *
607 * Returns the number of pages mapped in success, negative error code
608 * for failure.
609 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
610 * the function from completing its task.
611 * An -ENOENT error code indicates that userspace process is being terminated
612 * and mm was already destroyed.
613 * @umem: the umem to map and pin
614 * @user_virt: the address from which we need to map.
615 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
616 *        bigger due to alignment, and may also be smaller in case of an error
617 *        pinning or mapping a page. The actual pages mapped is returned in
618 *        the return value.
619 * @access_mask: bit mask of the requested access permissions for the given
620 *               range.
621 * @current_seq: the MMU notifiers sequance value for synchronization with
622 *               invalidations. the sequance number is read from
623 *               umem->odp_data->notifiers_seq before calling this function
624 */
625int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
626			      u64 access_mask, unsigned long current_seq)
 
627{
628	struct task_struct *owning_process  = NULL;
629	struct mm_struct   *owning_mm       = NULL;
630	struct page       **local_page_list = NULL;
631	u64 page_mask, off;
632	int j, k, ret = 0, start_idx, npages = 0, page_shift;
633	unsigned int flags = 0;
634	phys_addr_t p = 0;
635
636	if (access_mask == 0)
637		return -EINVAL;
638
639	if (user_virt < ib_umem_start(umem) ||
640	    user_virt + bcnt > ib_umem_end(umem))
641		return -EFAULT;
642
643	local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
644	if (!local_page_list)
645		return -ENOMEM;
646
647	page_shift = umem->page_shift;
648	page_mask = ~(BIT(page_shift) - 1);
649	off = user_virt & (~page_mask);
650	user_virt = user_virt & page_mask;
651	bcnt += off; /* Charge for the first page offset as well. */
652
653	owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
654	if (owning_process == NULL) {
 
 
 
 
 
655		ret = -EINVAL;
656		goto out_no_task;
657	}
658
659	owning_mm = get_task_mm(owning_process);
660	if (owning_mm == NULL) {
661		ret = -ENOENT;
662		goto out_put_task;
 
 
 
 
 
 
663	}
664
665	if (access_mask & ODP_WRITE_ALLOWED_BIT)
666		flags |= FOLL_WRITE;
 
 
 
 
667
668	start_idx = (user_virt - ib_umem_start(umem)) >> page_shift;
669	k = start_idx;
 
 
 
 
 
 
670
671	while (bcnt > 0) {
672		const size_t gup_num_pages = min_t(size_t,
673				(bcnt + BIT(page_shift) - 1) >> page_shift,
674				PAGE_SIZE / sizeof(struct page *));
675
676		down_read(&owning_mm->mmap_sem);
677		/*
678		 * Note: this might result in redundent page getting. We can
679		 * avoid this by checking dma_list to be 0 before calling
680		 * get_user_pages. However, this make the code much more
681		 * complex (and doesn't gain us much performance in most use
682		 * cases).
683		 */
684		npages = get_user_pages_remote(owning_process, owning_mm,
685				user_virt, gup_num_pages,
686				flags, local_page_list, NULL, NULL);
687		up_read(&owning_mm->mmap_sem);
688
689		if (npages < 0)
690			break;
691
692		bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
693		mutex_lock(&umem->odp_data->umem_mutex);
694		for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
695			if (user_virt & ~page_mask) {
696				p += PAGE_SIZE;
697				if (page_to_phys(local_page_list[j]) != p) {
698					ret = -EFAULT;
699					break;
700				}
701				put_page(local_page_list[j]);
702				continue;
703			}
 
 
 
 
704
705			ret = ib_umem_odp_map_dma_single_page(
706					umem, k, local_page_list[j],
707					access_mask, current_seq);
708			if (ret < 0)
709				break;
710
711			p = page_to_phys(local_page_list[j]);
712			k++;
 
 
713		}
714		mutex_unlock(&umem->odp_data->umem_mutex);
715
 
 
 
716		if (ret < 0) {
717			/* Release left over pages when handling errors. */
718			for (++j; j < npages; ++j)
719				put_page(local_page_list[j]);
720			break;
721		}
722	}
 
 
 
 
 
723
724	if (ret >= 0) {
725		if (npages < 0 && k == start_idx)
726			ret = npages;
727		else
728			ret = k - start_idx;
729	}
730
731	mmput(owning_mm);
732out_put_task:
733	put_task_struct(owning_process);
734out_no_task:
735	free_page((unsigned long)local_page_list);
736	return ret;
737}
738EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
739
740void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
741				 u64 bound)
742{
 
 
743	int idx;
744	u64 addr;
745	struct ib_device *dev = umem->context->device;
746
747	virt  = max_t(u64, virt,  ib_umem_start(umem));
748	bound = min_t(u64, bound, ib_umem_end(umem));
749	/* Note that during the run of this function, the
750	 * notifiers_count of the MR is > 0, preventing any racing
751	 * faults from completion. We might be racing with other
752	 * invalidations, so we must make sure we free each page only
753	 * once. */
754	mutex_lock(&umem->odp_data->umem_mutex);
755	for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
756		idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
757		if (umem->odp_data->page_list[idx]) {
758			struct page *page = umem->odp_data->page_list[idx];
759			dma_addr_t dma = umem->odp_data->dma_list[idx];
760			dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
761
762			WARN_ON(!dma_addr);
763
764			ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
765					  DMA_BIDIRECTIONAL);
766			if (dma & ODP_WRITE_ALLOWED_BIT) {
767				struct page *head_page = compound_head(page);
768				/*
769				 * set_page_dirty prefers being called with
770				 * the page lock. However, MMU notifiers are
771				 * called sometimes with and sometimes without
772				 * the lock. We rely on the umem_mutex instead
773				 * to prevent other mmu notifiers from
774				 * continuing and allowing the page mapping to
775				 * be removed.
776				 */
777				set_page_dirty(head_page);
778			}
779			/* on demand pinning support */
780			if (!umem->context->invalidate_range)
781				put_page(page);
782			umem->odp_data->page_list[idx] = NULL;
783			umem->odp_data->dma_list[idx] = 0;
784			umem->npages--;
785		}
786	}
787	mutex_unlock(&umem->odp_data->umem_mutex);
788}
789EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
790
791/* @last is not a part of the interval. See comment for function
792 * node_last.
793 */
794int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
795				  u64 start, u64 last,
796				  umem_call_back cb,
797				  void *cookie)
798{
799	int ret_val = 0;
800	struct umem_odp_node *node, *next;
801	struct ib_umem_odp *umem;
802
803	if (unlikely(start == last))
804		return ret_val;
805
806	for (node = rbt_ib_umem_iter_first(root, start, last - 1);
807			node; node = next) {
808		next = rbt_ib_umem_iter_next(node, start, last - 1);
809		umem = container_of(node, struct ib_umem_odp, interval_tree);
810		ret_val = cb(umem->umem, start, last, cookie) || ret_val;
811	}
812
813	return ret_val;
814}
815EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
816
817struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
818				       u64 addr, u64 length)
819{
820	struct umem_odp_node *node;
821
822	node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
823	if (node)
824		return container_of(node, struct ib_umem_odp, interval_tree);
825	return NULL;
826
827}
828EXPORT_SYMBOL(rbt_ib_umem_lookup);