Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 33#include <linux/types.h>
 34#include <linux/sched.h>
 35#include <linux/sched/mm.h>
 36#include <linux/sched/task.h>
 37#include <linux/pid.h>
 38#include <linux/slab.h>
 39#include <linux/export.h>
 40#include <linux/vmalloc.h>
 41#include <linux/hugetlb.h>
 42#include <linux/interval_tree.h>
 43#include <linux/hmm.h>
 44#include <linux/pagemap.h>
 45
 
 
 46#include <rdma/ib_umem_odp.h>
 47
 48#include "uverbs.h"
 
 
 49
 50static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
 51				   const struct mmu_interval_notifier_ops *ops)
 
 
 
 
 
 
 
 
 
 
 
 
 
 52{
 53	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54
 55	umem_odp->umem.is_odp = 1;
 56	mutex_init(&umem_odp->umem_mutex);
 57
 58	if (!umem_odp->is_implicit_odp) {
 59		size_t page_size = 1UL << umem_odp->page_shift;
 60		unsigned long start;
 61		unsigned long end;
 62		size_t ndmas, npfns;
 63
 64		start = ALIGN_DOWN(umem_odp->umem.address, page_size);
 65		if (check_add_overflow(umem_odp->umem.address,
 66				       (unsigned long)umem_odp->umem.length,
 67				       &end))
 68			return -EOVERFLOW;
 69		end = ALIGN(end, page_size);
 70		if (unlikely(end < page_size))
 71			return -EOVERFLOW;
 72
 73		ndmas = (end - start) >> umem_odp->page_shift;
 74		if (!ndmas)
 75			return -EINVAL;
 76
 77		npfns = (end - start) >> PAGE_SHIFT;
 78		umem_odp->pfn_list = kvcalloc(
 79			npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
 80		if (!umem_odp->pfn_list)
 81			return -ENOMEM;
 82
 83		umem_odp->dma_list = kvcalloc(
 84			ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL);
 85		if (!umem_odp->dma_list) {
 86			ret = -ENOMEM;
 87			goto out_pfn_list;
 
 
 
 88		}
 89
 90		ret = mmu_interval_notifier_insert(&umem_odp->notifier,
 91						   umem_odp->umem.owning_mm,
 92						   start, end - start, ops);
 93		if (ret)
 94			goto out_dma_list;
 95	}
 
 96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 97	return 0;
 
 98
 99out_dma_list:
100	kvfree(umem_odp->dma_list);
101out_pfn_list:
102	kvfree(umem_odp->pfn_list);
103	return ret;
 
 
 
 
 
 
 
 
 
 
104}
105
106/**
107 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem
108 *
109 * Implicit ODP umems do not have a VA range and do not have any page lists.
110 * They exist only to hold the per_mm reference to help the driver create
111 * children umems.
112 *
113 * @device: IB device to create UMEM
114 * @access: ib_reg_mr access flags
115 */
116struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
117					       int access)
118{
119	struct ib_umem *umem;
120	struct ib_umem_odp *umem_odp;
121	int ret;
122
123	if (access & IB_ACCESS_HUGETLB)
124		return ERR_PTR(-EINVAL);
125
126	umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
127	if (!umem_odp)
128		return ERR_PTR(-ENOMEM);
129	umem = &umem_odp->umem;
130	umem->ibdev = device;
131	umem->writable = ib_access_writable(access);
132	umem->owning_mm = current->mm;
133	umem_odp->is_implicit_odp = 1;
134	umem_odp->page_shift = PAGE_SHIFT;
135
136	umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
137	ret = ib_init_umem_odp(umem_odp, NULL);
138	if (ret) {
139		put_pid(umem_odp->tgid);
140		kfree(umem_odp);
141		return ERR_PTR(ret);
142	}
143	return umem_odp;
144}
145EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
146
147/**
148 * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit
149 *                           parent ODP umem
150 *
151 * @root: The parent umem enclosing the child. This must be allocated using
152 *        ib_alloc_implicit_odp_umem()
153 * @addr: The starting userspace VA
154 * @size: The length of the userspace VA
155 * @ops: MMU interval ops, currently only @invalidate
156 */
157struct ib_umem_odp *
158ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
159			size_t size,
160			const struct mmu_interval_notifier_ops *ops)
161{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162	/*
163	 * Caller must ensure that root cannot be freed during the call to
164	 * ib_alloc_odp_umem.
 
165	 */
166	struct ib_umem_odp *odp_data;
167	struct ib_umem *umem;
168	int ret;
169
170	if (WARN_ON(!root->is_implicit_odp))
171		return ERR_PTR(-EINVAL);
172
173	odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
174	if (!odp_data)
175		return ERR_PTR(-ENOMEM);
176	umem = &odp_data->umem;
177	umem->ibdev = root->umem.ibdev;
178	umem->length     = size;
179	umem->address    = addr;
180	umem->writable   = root->umem.writable;
181	umem->owning_mm  = root->umem.owning_mm;
182	odp_data->page_shift = PAGE_SHIFT;
183	odp_data->notifier.ops = ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
185	/*
186	 * A mmget must be held when registering a notifier, the owming_mm only
187	 * has a mm_grab at this point.
 
 
188	 */
189	if (!mmget_not_zero(umem->owning_mm)) {
190		ret = -EFAULT;
191		goto out_free;
192	}
193
194	odp_data->tgid = get_pid(root->tgid);
195	ret = ib_init_umem_odp(odp_data, ops);
196	if (ret)
197		goto out_tgid;
198	mmput(umem->owning_mm);
199	return odp_data;
200
201out_tgid:
202	put_pid(odp_data->tgid);
203	mmput(umem->owning_mm);
204out_free:
205	kfree(odp_data);
206	return ERR_PTR(ret);
207}
208EXPORT_SYMBOL(ib_umem_odp_alloc_child);
209
210/**
211 * ib_umem_odp_get - Create a umem_odp for a userspace va
212 *
213 * @device: IB device struct to get UMEM
214 * @addr: userspace virtual address to start at
215 * @size: length of region to pin
216 * @access: IB_ACCESS_xxx flags for memory being pinned
217 * @ops: MMU interval ops, currently only @invalidate
218 *
219 * The driver should use when the access flags indicate ODP memory. It avoids
220 * pinning, instead, stores the mm for future page fault handling in
221 * conjunction with MMU notifiers.
222 */
223struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
224				    unsigned long addr, size_t size, int access,
225				    const struct mmu_interval_notifier_ops *ops)
226{
227	struct ib_umem_odp *umem_odp;
228	int ret;
229
230	if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
231		return ERR_PTR(-EINVAL);
232
233	umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
234	if (!umem_odp)
235		return ERR_PTR(-ENOMEM);
236
237	umem_odp->umem.ibdev = device;
238	umem_odp->umem.length = size;
239	umem_odp->umem.address = addr;
240	umem_odp->umem.writable = ib_access_writable(access);
241	umem_odp->umem.owning_mm = current->mm;
242	umem_odp->notifier.ops = ops;
243
244	umem_odp->page_shift = PAGE_SHIFT;
245#ifdef CONFIG_HUGETLB_PAGE
246	if (access & IB_ACCESS_HUGETLB)
247		umem_odp->page_shift = HPAGE_SHIFT;
248#endif
249
250	umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
251	ret = ib_init_umem_odp(umem_odp, ops);
252	if (ret)
253		goto err_put_pid;
254	return umem_odp;
255
256err_put_pid:
257	put_pid(umem_odp->tgid);
258	kfree(umem_odp);
259	return ERR_PTR(ret);
260}
261EXPORT_SYMBOL(ib_umem_odp_get);
262
263void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
264{
 
 
265	/*
266	 * Ensure that no more pages are mapped in the umem.
267	 *
268	 * It is the driver's responsibility to ensure, before calling us,
269	 * that the hardware will not attempt to access the MR any more.
270	 */
271	if (!umem_odp->is_implicit_odp) {
272		mutex_lock(&umem_odp->umem_mutex);
273		ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
274					    ib_umem_end(umem_odp));
275		mutex_unlock(&umem_odp->umem_mutex);
276		mmu_interval_notifier_remove(&umem_odp->notifier);
277		kvfree(umem_odp->dma_list);
278		kvfree(umem_odp->pfn_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279	}
280	put_pid(umem_odp->tgid);
281	kfree(umem_odp);
 
 
 
 
 
282}
283EXPORT_SYMBOL(ib_umem_odp_release);
284
285/*
286 * Map for DMA and insert a single page into the on-demand paging page tables.
287 *
288 * @umem: the umem to insert the page to.
289 * @dma_index: index in the umem to add the dma to.
290 * @page: the page struct to map and add.
291 * @access_mask: access permissions needed for this page.
 
 
 
292 *
293 * The function returns -EFAULT if the DMA mapping operation fails.
 
294 *
 
 
 
295 */
296static int ib_umem_odp_map_dma_single_page(
297		struct ib_umem_odp *umem_odp,
298		unsigned int dma_index,
 
299		struct page *page,
300		u64 access_mask)
 
301{
302	struct ib_device *dev = umem_odp->umem.ibdev;
303	dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index];
 
 
 
304
305	if (*dma_addr) {
306		/*
307		 * If the page is already dma mapped it means it went through
308		 * a non-invalidating trasition, like read-only to writable.
309		 * Resync the flags.
310		 */
311		*dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask;
312		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313	}
314
315	*dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
316				    DMA_BIDIRECTIONAL);
317	if (ib_dma_mapping_error(dev, *dma_addr)) {
318		*dma_addr = 0;
319		return -EFAULT;
320	}
321	umem_odp->npages++;
322	*dma_addr |= access_mask;
323	return 0;
324}
325
326/**
327 * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
328 *
329 * Maps the range passed in the argument to DMA addresses.
330 * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
331 * Upon success the ODP MR will be locked to let caller complete its device
332 * page table update.
333 *
334 * Returns the number of pages mapped in success, negative error code
335 * for failure.
336 * @umem_odp: the umem to map and pin
 
 
 
337 * @user_virt: the address from which we need to map.
338 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
339 *        bigger due to alignment, and may also be smaller in case of an error
340 *        pinning or mapping a page. The actual pages mapped is returned in
341 *        the return value.
342 * @access_mask: bit mask of the requested access permissions for the given
343 *               range.
344 * @fault: is faulting required for the given range
 
 
345 */
346int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
347				 u64 bcnt, u64 access_mask, bool fault)
348			__acquires(&umem_odp->umem_mutex)
349{
350	struct task_struct *owning_process  = NULL;
351	struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
352	int pfn_index, dma_index, ret = 0, start_idx;
353	unsigned int page_shift, hmm_order, pfn_start_idx;
354	unsigned long num_pfns, current_seq;
355	struct hmm_range range = {};
356	unsigned long timeout;
357
358	if (access_mask == 0)
359		return -EINVAL;
360
361	if (user_virt < ib_umem_start(umem_odp) ||
362	    user_virt + bcnt > ib_umem_end(umem_odp))
363		return -EFAULT;
364
365	page_shift = umem_odp->page_shift;
 
 
 
 
 
 
 
366
367	/*
368	 * owning_process is allowed to be NULL, this means somehow the mm is
369	 * existing beyond the lifetime of the originating process.. Presumably
370	 * mmget_not_zero will fail in this case.
371	 */
372	owning_process = get_pid_task(umem_odp->tgid, PIDTYPE_PID);
373	if (!owning_process || !mmget_not_zero(owning_mm)) {
374		ret = -EINVAL;
375		goto out_put_task;
376	}
377
378	range.notifier = &umem_odp->notifier;
379	range.start = ALIGN_DOWN(user_virt, 1UL << page_shift);
380	range.end = ALIGN(user_virt + bcnt, 1UL << page_shift);
381	pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
382	num_pfns = (range.end - range.start) >> PAGE_SHIFT;
383	if (fault) {
384		range.default_flags = HMM_PFN_REQ_FAULT;
385
386		if (access_mask & ODP_WRITE_ALLOWED_BIT)
387			range.default_flags |= HMM_PFN_REQ_WRITE;
388	}
389
390	range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]);
391	timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
392
393retry:
394	current_seq = range.notifier_seq =
395		mmu_interval_read_begin(&umem_odp->notifier);
396
397	mmap_read_lock(owning_mm);
398	ret = hmm_range_fault(&range);
399	mmap_read_unlock(owning_mm);
400	if (unlikely(ret)) {
401		if (ret == -EBUSY && !time_after(jiffies, timeout))
402			goto retry;
403		goto out_put_mm;
404	}
405
406	start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift;
407	dma_index = start_idx;
408
409	mutex_lock(&umem_odp->umem_mutex);
410	if (mmu_interval_read_retry(&umem_odp->notifier, current_seq)) {
411		mutex_unlock(&umem_odp->umem_mutex);
412		goto retry;
413	}
414
415	for (pfn_index = 0; pfn_index < num_pfns;
416		pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
417
418		if (fault) {
419			/*
420			 * Since we asked for hmm_range_fault() to populate
421			 * pages it shouldn't return an error entry on success.
422			 */
423			WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
424			WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
425		} else {
426			if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) {
427				WARN_ON(umem_odp->dma_list[dma_index]);
428				continue;
429			}
430			access_mask = ODP_READ_ALLOWED_BIT;
431			if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE)
432				access_mask |= ODP_WRITE_ALLOWED_BIT;
433		}
434
435		hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
436		/* If a hugepage was detected and ODP wasn't set for, the umem
437		 * page_shift will be used, the opposite case is an error.
 
 
 
 
438		 */
439		if (hmm_order + PAGE_SHIFT < page_shift) {
440			ret = -EINVAL;
441			ibdev_dbg(umem_odp->umem.ibdev,
442				  "%s: un-expected hmm_order %u, page_shift %u\n",
443				  __func__, hmm_order, page_shift);
 
444			break;
 
 
 
 
 
 
 
 
 
 
 
445		}
 
446
447		ret = ib_umem_odp_map_dma_single_page(
448				umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]),
449				access_mask);
450		if (ret < 0) {
451			ibdev_dbg(umem_odp->umem.ibdev,
452				  "ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
 
453			break;
454		}
455	}
456	/* upon success lock should stay on hold for the callee */
457	if (!ret)
458		ret = dma_index - start_idx;
459	else
460		mutex_unlock(&umem_odp->umem_mutex);
461
462out_put_mm:
463	mmput_async(owning_mm);
 
 
 
 
 
 
464out_put_task:
465	if (owning_process)
466		put_task_struct(owning_process);
 
467	return ret;
468}
469EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
470
471void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
472				 u64 bound)
473{
474	dma_addr_t dma_addr;
475	dma_addr_t dma;
476	int idx;
477	u64 addr;
478	struct ib_device *dev = umem_odp->umem.ibdev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479
480	lockdep_assert_held(&umem_odp->umem_mutex);
481
482	virt = max_t(u64, virt, ib_umem_start(umem_odp));
483	bound = min_t(u64, bound, ib_umem_end(umem_odp));
484	for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
485		idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
486		dma = umem_odp->dma_list[idx];
487
488		/* The access flags guaranteed a valid DMA address in case was NULL */
489		if (dma) {
490			unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
491			struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
492
493			dma_addr = dma & ODP_DMA_ADDR_MASK;
494			ib_dma_unmap_page(dev, dma_addr,
495					  BIT(umem_odp->page_shift),
496					  DMA_BIDIRECTIONAL);
497			if (dma & ODP_WRITE_ALLOWED_BIT) {
498				struct page *head_page = compound_head(page);
499				/*
500				 * set_page_dirty prefers being called with
501				 * the page lock. However, MMU notifiers are
502				 * called sometimes with and sometimes without
503				 * the lock. We rely on the umem_mutex instead
504				 * to prevent other mmu notifiers from
505				 * continuing and allowing the page mapping to
506				 * be removed.
507				 */
508				set_page_dirty(head_page);
509			}
510			umem_odp->dma_list[idx] = 0;
511			umem_odp->npages--;
 
 
 
512		}
513	}
 
514}
515EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
v4.10.11
  1/*
  2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 33#include <linux/types.h>
 34#include <linux/sched.h>
 
 
 35#include <linux/pid.h>
 36#include <linux/slab.h>
 37#include <linux/export.h>
 38#include <linux/vmalloc.h>
 
 
 
 
 39
 40#include <rdma/ib_verbs.h>
 41#include <rdma/ib_umem.h>
 42#include <rdma/ib_umem_odp.h>
 43
 44static void ib_umem_notifier_start_account(struct ib_umem *item)
 45{
 46	mutex_lock(&item->odp_data->umem_mutex);
 47
 48	/* Only update private counters for this umem if it has them.
 49	 * Otherwise skip it. All page faults will be delayed for this umem. */
 50	if (item->odp_data->mn_counters_active) {
 51		int notifiers_count = item->odp_data->notifiers_count++;
 52
 53		if (notifiers_count == 0)
 54			/* Initialize the completion object for waiting on
 55			 * notifiers. Since notifier_count is zero, no one
 56			 * should be waiting right now. */
 57			reinit_completion(&item->odp_data->notifier_completion);
 58	}
 59	mutex_unlock(&item->odp_data->umem_mutex);
 60}
 61
 62static void ib_umem_notifier_end_account(struct ib_umem *item)
 63{
 64	mutex_lock(&item->odp_data->umem_mutex);
 65
 66	/* Only update private counters for this umem if it has them.
 67	 * Otherwise skip it. All page faults will be delayed for this umem. */
 68	if (item->odp_data->mn_counters_active) {
 69		/*
 70		 * This sequence increase will notify the QP page fault that
 71		 * the page that is going to be mapped in the spte could have
 72		 * been freed.
 73		 */
 74		++item->odp_data->notifiers_seq;
 75		if (--item->odp_data->notifiers_count == 0)
 76			complete_all(&item->odp_data->notifier_completion);
 77	}
 78	mutex_unlock(&item->odp_data->umem_mutex);
 79}
 80
 81/* Account for a new mmu notifier in an ib_ucontext. */
 82static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
 83{
 84	atomic_inc(&context->notifier_count);
 85}
 86
 87/* Account for a terminating mmu notifier in an ib_ucontext.
 88 *
 89 * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
 90 * the function takes the semaphore itself. */
 91static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
 92{
 93	int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
 94
 95	if (zero_notifiers &&
 96	    !list_empty(&context->no_private_counters)) {
 97		/* No currently running mmu notifiers. Now is the chance to
 98		 * add private accounting to all previously added umems. */
 99		struct ib_umem_odp *odp_data, *next;
100
101		/* Prevent concurrent mmu notifiers from working on the
102		 * no_private_counters list. */
103		down_write(&context->umem_rwsem);
104
105		/* Read the notifier_count again, with the umem_rwsem
106		 * semaphore taken for write. */
107		if (!atomic_read(&context->notifier_count)) {
108			list_for_each_entry_safe(odp_data, next,
109						 &context->no_private_counters,
110						 no_private_counters) {
111				mutex_lock(&odp_data->umem_mutex);
112				odp_data->mn_counters_active = true;
113				list_del(&odp_data->no_private_counters);
114				complete_all(&odp_data->notifier_completion);
115				mutex_unlock(&odp_data->umem_mutex);
116			}
117		}
118
119		up_write(&context->umem_rwsem);
 
 
 
 
120	}
121}
122
123static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
124					       u64 end, void *cookie) {
125	/*
126	 * Increase the number of notifiers running, to
127	 * prevent any further fault handling on this MR.
128	 */
129	ib_umem_notifier_start_account(item);
130	item->odp_data->dying = 1;
131	/* Make sure that the fact the umem is dying is out before we release
132	 * all pending page faults. */
133	smp_wmb();
134	complete_all(&item->odp_data->notifier_completion);
135	item->context->invalidate_range(item, ib_umem_start(item),
136					ib_umem_end(item));
137	return 0;
138}
139
140static void ib_umem_notifier_release(struct mmu_notifier *mn,
141				     struct mm_struct *mm)
142{
143	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
144
145	if (!context->invalidate_range)
146		return;
147
148	ib_ucontext_notifier_start_account(context);
149	down_read(&context->umem_rwsem);
150	rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
151				      ULLONG_MAX,
152				      ib_umem_notifier_release_trampoline,
153				      NULL);
154	up_read(&context->umem_rwsem);
155}
156
157static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
158				      u64 end, void *cookie)
 
 
 
 
 
 
 
 
 
 
159{
160	ib_umem_notifier_start_account(item);
161	item->context->invalidate_range(item, start, start + PAGE_SIZE);
162	ib_umem_notifier_end_account(item);
163	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164}
 
165
166static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
167					     struct mm_struct *mm,
168					     unsigned long address)
 
 
 
 
 
 
 
 
 
 
 
169{
170	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
171
172	if (!context->invalidate_range)
173		return;
174
175	ib_ucontext_notifier_start_account(context);
176	down_read(&context->umem_rwsem);
177	rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
178				      address + PAGE_SIZE,
179				      invalidate_page_trampoline, NULL);
180	up_read(&context->umem_rwsem);
181	ib_ucontext_notifier_end_account(context);
182}
183
184static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
185					     u64 end, void *cookie)
186{
187	ib_umem_notifier_start_account(item);
188	item->context->invalidate_range(item, start, end);
189	return 0;
190}
191
192static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
193						    struct mm_struct *mm,
194						    unsigned long start,
195						    unsigned long end)
196{
197	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
198
199	if (!context->invalidate_range)
200		return;
201
202	ib_ucontext_notifier_start_account(context);
203	down_read(&context->umem_rwsem);
204	rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
205				      end,
206				      invalidate_range_start_trampoline, NULL);
207	up_read(&context->umem_rwsem);
208}
209
210static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
211					   u64 end, void *cookie)
212{
213	ib_umem_notifier_end_account(item);
214	return 0;
215}
216
217static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
218						  struct mm_struct *mm,
219						  unsigned long start,
220						  unsigned long end)
221{
222	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
223
224	if (!context->invalidate_range)
225		return;
226
227	down_read(&context->umem_rwsem);
228	rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
229				      end,
230				      invalidate_range_end_trampoline, NULL);
231	up_read(&context->umem_rwsem);
232	ib_ucontext_notifier_end_account(context);
233}
234
235static const struct mmu_notifier_ops ib_umem_notifiers = {
236	.release                    = ib_umem_notifier_release,
237	.invalidate_page            = ib_umem_notifier_invalidate_page,
238	.invalidate_range_start     = ib_umem_notifier_invalidate_range_start,
239	.invalidate_range_end       = ib_umem_notifier_invalidate_range_end,
240};
241
242int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
243{
244	int ret_val;
245	struct pid *our_pid;
246	struct mm_struct *mm = get_task_mm(current);
247
248	if (!mm)
249		return -EINVAL;
250
251	/* Prevent creating ODP MRs in child processes */
252	rcu_read_lock();
253	our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
254	rcu_read_unlock();
255	put_pid(our_pid);
256	if (context->tgid != our_pid) {
257		ret_val = -EINVAL;
258		goto out_mm;
259	}
260
261	umem->hugetlb = 0;
262	umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
263	if (!umem->odp_data) {
264		ret_val = -ENOMEM;
265		goto out_mm;
266	}
267	umem->odp_data->umem = umem;
268
269	mutex_init(&umem->odp_data->umem_mutex);
270
271	init_completion(&umem->odp_data->notifier_completion);
272
273	umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
274					    sizeof(*umem->odp_data->page_list));
275	if (!umem->odp_data->page_list) {
276		ret_val = -ENOMEM;
277		goto out_odp_data;
278	}
279
280	umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
281					  sizeof(*umem->odp_data->dma_list));
282	if (!umem->odp_data->dma_list) {
283		ret_val = -ENOMEM;
284		goto out_page_list;
285	}
286
287	/*
288	 * When using MMU notifiers, we will get a
289	 * notification before the "current" task (and MM) is
290	 * destroyed. We use the umem_rwsem semaphore to synchronize.
291	 */
292	down_write(&context->umem_rwsem);
293	context->odp_mrs_count++;
294	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
295		rbt_ib_umem_insert(&umem->odp_data->interval_tree,
296				   &context->umem_tree);
297	if (likely(!atomic_read(&context->notifier_count)) ||
298	    context->odp_mrs_count == 1)
299		umem->odp_data->mn_counters_active = true;
300	else
301		list_add(&umem->odp_data->no_private_counters,
302			 &context->no_private_counters);
303	downgrade_write(&context->umem_rwsem);
304
305	if (context->odp_mrs_count == 1) {
306		/*
307		 * Note that at this point, no MMU notifier is running
308		 * for this context!
309		 */
310		atomic_set(&context->notifier_count, 0);
311		INIT_HLIST_NODE(&context->mn.hlist);
312		context->mn.ops = &ib_umem_notifiers;
313		/*
314		 * Lock-dep detects a false positive for mmap_sem vs.
315		 * umem_rwsem, due to not grasping downgrade_write correctly.
316		 */
317		lockdep_off();
318		ret_val = mmu_notifier_register(&context->mn, mm);
319		lockdep_on();
320		if (ret_val) {
321			pr_err("Failed to register mmu_notifier %d\n", ret_val);
322			ret_val = -EBUSY;
323			goto out_mutex;
324		}
325	}
326
327	up_read(&context->umem_rwsem);
328
329	/*
330	 * Note that doing an mmput can cause a notifier for the relevant mm.
331	 * If the notifier is called while we hold the umem_rwsem, this will
332	 * cause a deadlock. Therefore, we release the reference only after we
333	 * released the semaphore.
334	 */
335	mmput(mm);
336	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
338out_mutex:
339	up_read(&context->umem_rwsem);
340	vfree(umem->odp_data->dma_list);
341out_page_list:
342	vfree(umem->odp_data->page_list);
343out_odp_data:
344	kfree(umem->odp_data);
345out_mm:
346	mmput(mm);
347	return ret_val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348}
 
349
350void ib_umem_odp_release(struct ib_umem *umem)
351{
352	struct ib_ucontext *context = umem->context;
353
354	/*
355	 * Ensure that no more pages are mapped in the umem.
356	 *
357	 * It is the driver's responsibility to ensure, before calling us,
358	 * that the hardware will not attempt to access the MR any more.
359	 */
360	ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
361				    ib_umem_end(umem));
362
363	down_write(&context->umem_rwsem);
364	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
365		rbt_ib_umem_remove(&umem->odp_data->interval_tree,
366				   &context->umem_tree);
367	context->odp_mrs_count--;
368	if (!umem->odp_data->mn_counters_active) {
369		list_del(&umem->odp_data->no_private_counters);
370		complete_all(&umem->odp_data->notifier_completion);
371	}
372
373	/*
374	 * Downgrade the lock to a read lock. This ensures that the notifiers
375	 * (who lock the mutex for reading) will be able to finish, and we
376	 * will be able to enventually obtain the mmu notifiers SRCU. Note
377	 * that since we are doing it atomically, no other user could register
378	 * and unregister while we do the check.
379	 */
380	downgrade_write(&context->umem_rwsem);
381	if (!context->odp_mrs_count) {
382		struct task_struct *owning_process = NULL;
383		struct mm_struct *owning_mm        = NULL;
384
385		owning_process = get_pid_task(context->tgid,
386					      PIDTYPE_PID);
387		if (owning_process == NULL)
388			/*
389			 * The process is already dead, notifier were removed
390			 * already.
391			 */
392			goto out;
393
394		owning_mm = get_task_mm(owning_process);
395		if (owning_mm == NULL)
396			/*
397			 * The process' mm is already dead, notifier were
398			 * removed already.
399			 */
400			goto out_put_task;
401		mmu_notifier_unregister(&context->mn, owning_mm);
402
403		mmput(owning_mm);
404
405out_put_task:
406		put_task_struct(owning_process);
407	}
408out:
409	up_read(&context->umem_rwsem);
410
411	vfree(umem->odp_data->dma_list);
412	vfree(umem->odp_data->page_list);
413	kfree(umem->odp_data);
414	kfree(umem);
415}
 
416
417/*
418 * Map for DMA and insert a single page into the on-demand paging page tables.
419 *
420 * @umem: the umem to insert the page to.
421 * @page_index: index in the umem to add the page to.
422 * @page: the page struct to map and add.
423 * @access_mask: access permissions needed for this page.
424 * @current_seq: sequence number for synchronization with invalidations.
425 *               the sequence number is taken from
426 *               umem->odp_data->notifiers_seq.
427 *
428 * The function returns -EFAULT if the DMA mapping operation fails. It returns
429 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
430 *
431 * The page is released via put_page even if the operation failed. For
432 * on-demand pinning, the page is released whenever it isn't stored in the
433 * umem.
434 */
435static int ib_umem_odp_map_dma_single_page(
436		struct ib_umem *umem,
437		int page_index,
438		u64 base_virt_addr,
439		struct page *page,
440		u64 access_mask,
441		unsigned long current_seq)
442{
443	struct ib_device *dev = umem->context->device;
444	dma_addr_t dma_addr;
445	int stored_page = 0;
446	int remove_existing_mapping = 0;
447	int ret = 0;
448
449	/*
450	 * Note: we avoid writing if seq is different from the initial seq, to
451	 * handle case of a racing notifier. This check also allows us to bail
452	 * early if we have a notifier running in parallel with us.
453	 */
454	if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
455		ret = -EAGAIN;
456		goto out;
457	}
458	if (!(umem->odp_data->dma_list[page_index])) {
459		dma_addr = ib_dma_map_page(dev,
460					   page,
461					   0, PAGE_SIZE,
462					   DMA_BIDIRECTIONAL);
463		if (ib_dma_mapping_error(dev, dma_addr)) {
464			ret = -EFAULT;
465			goto out;
466		}
467		umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
468		umem->odp_data->page_list[page_index] = page;
469		stored_page = 1;
470	} else if (umem->odp_data->page_list[page_index] == page) {
471		umem->odp_data->dma_list[page_index] |= access_mask;
472	} else {
473		pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
474		       umem->odp_data->page_list[page_index], page);
475		/* Better remove the mapping now, to prevent any further
476		 * damage. */
477		remove_existing_mapping = 1;
478	}
479
480out:
481	/* On Demand Paging - avoid pinning the page */
482	if (umem->context->invalidate_range || !stored_page)
483		put_page(page);
484
485	if (remove_existing_mapping && umem->context->invalidate_range) {
486		invalidate_page_trampoline(
487			umem,
488			base_virt_addr + (page_index * PAGE_SIZE),
489			base_virt_addr + ((page_index+1)*PAGE_SIZE),
490			NULL);
491		ret = -EAGAIN;
492	}
493
494	return ret;
 
 
 
 
 
 
 
 
495}
496
497/**
498 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
499 *
500 * Pins the range of pages passed in the argument, and maps them to
501 * DMA addresses. The DMA addresses of the mapped pages is updated in
502 * umem->odp_data->dma_list.
 
503 *
504 * Returns the number of pages mapped in success, negative error code
505 * for failure.
506 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
507 * the function from completing its task.
508 *
509 * @umem: the umem to map and pin
510 * @user_virt: the address from which we need to map.
511 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
512 *        bigger due to alignment, and may also be smaller in case of an error
513 *        pinning or mapping a page. The actual pages mapped is returned in
514 *        the return value.
515 * @access_mask: bit mask of the requested access permissions for the given
516 *               range.
517 * @current_seq: the MMU notifiers sequance value for synchronization with
518 *               invalidations. the sequance number is read from
519 *               umem->odp_data->notifiers_seq before calling this function
520 */
521int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
522			      u64 access_mask, unsigned long current_seq)
 
523{
524	struct task_struct *owning_process  = NULL;
525	struct mm_struct   *owning_mm       = NULL;
526	struct page       **local_page_list = NULL;
527	u64 off;
528	int j, k, ret = 0, start_idx, npages = 0;
529	u64 base_virt_addr;
530	unsigned int flags = 0;
531
532	if (access_mask == 0)
533		return -EINVAL;
534
535	if (user_virt < ib_umem_start(umem) ||
536	    user_virt + bcnt > ib_umem_end(umem))
537		return -EFAULT;
538
539	local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
540	if (!local_page_list)
541		return -ENOMEM;
542
543	off = user_virt & (~PAGE_MASK);
544	user_virt = user_virt & PAGE_MASK;
545	base_virt_addr = user_virt;
546	bcnt += off; /* Charge for the first page offset as well. */
547
548	owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
549	if (owning_process == NULL) {
 
 
 
 
 
550		ret = -EINVAL;
551		goto out_no_task;
 
 
 
 
 
 
 
 
 
 
 
 
552	}
553
554	owning_mm = get_task_mm(owning_process);
555	if (owning_mm == NULL) {
556		ret = -EINVAL;
557		goto out_put_task;
 
 
 
 
 
 
 
 
 
 
558	}
559
560	if (access_mask & ODP_WRITE_ALLOWED_BIT)
561		flags |= FOLL_WRITE;
 
 
 
 
 
 
562
563	start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
564	k = start_idx;
565
566	while (bcnt > 0) {
567		const size_t gup_num_pages =
568			min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
569			      PAGE_SIZE / sizeof(struct page *));
 
 
 
 
 
 
 
 
 
 
 
 
570
571		down_read(&owning_mm->mmap_sem);
572		/*
573		 * Note: this might result in redundent page getting. We can
574		 * avoid this by checking dma_list to be 0 before calling
575		 * get_user_pages. However, this make the code much more
576		 * complex (and doesn't gain us much performance in most use
577		 * cases).
578		 */
579		npages = get_user_pages_remote(owning_process, owning_mm,
580				user_virt, gup_num_pages,
581				flags, local_page_list, NULL, NULL);
582		up_read(&owning_mm->mmap_sem);
583
584		if (npages < 0)
585			break;
586
587		bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
588		user_virt += npages << PAGE_SHIFT;
589		mutex_lock(&umem->odp_data->umem_mutex);
590		for (j = 0; j < npages; ++j) {
591			ret = ib_umem_odp_map_dma_single_page(
592				umem, k, base_virt_addr, local_page_list[j],
593				access_mask, current_seq);
594			if (ret < 0)
595				break;
596			k++;
597		}
598		mutex_unlock(&umem->odp_data->umem_mutex);
599
 
 
 
600		if (ret < 0) {
601			/* Release left over pages when handling errors. */
602			for (++j; j < npages; ++j)
603				put_page(local_page_list[j]);
604			break;
605		}
606	}
 
 
 
 
 
607
608	if (ret >= 0) {
609		if (npages < 0 && k == start_idx)
610			ret = npages;
611		else
612			ret = k - start_idx;
613	}
614
615	mmput(owning_mm);
616out_put_task:
617	put_task_struct(owning_process);
618out_no_task:
619	free_page((unsigned long)local_page_list);
620	return ret;
621}
622EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
623
624void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
625				 u64 bound)
626{
 
 
627	int idx;
628	u64 addr;
629	struct ib_device *dev = umem->context->device;
630
631	virt  = max_t(u64, virt,  ib_umem_start(umem));
632	bound = min_t(u64, bound, ib_umem_end(umem));
633	/* Note that during the run of this function, the
634	 * notifiers_count of the MR is > 0, preventing any racing
635	 * faults from completion. We might be racing with other
636	 * invalidations, so we must make sure we free each page only
637	 * once. */
638	mutex_lock(&umem->odp_data->umem_mutex);
639	for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
640		idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
641		if (umem->odp_data->page_list[idx]) {
642			struct page *page = umem->odp_data->page_list[idx];
643			dma_addr_t dma = umem->odp_data->dma_list[idx];
644			dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
645
646			WARN_ON(!dma_addr);
647
648			ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
649					  DMA_BIDIRECTIONAL);
650			if (dma & ODP_WRITE_ALLOWED_BIT) {
651				struct page *head_page = compound_head(page);
652				/*
653				 * set_page_dirty prefers being called with
654				 * the page lock. However, MMU notifiers are
655				 * called sometimes with and sometimes without
656				 * the lock. We rely on the umem_mutex instead
657				 * to prevent other mmu notifiers from
658				 * continuing and allowing the page mapping to
659				 * be removed.
660				 */
661				set_page_dirty(head_page);
662			}
663			/* on demand pinning support */
664			if (!umem->context->invalidate_range)
665				put_page(page);
666			umem->odp_data->page_list[idx] = NULL;
667			umem->odp_data->dma_list[idx] = 0;
668		}
669	}
670	mutex_unlock(&umem->odp_data->umem_mutex);
671}
672EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);