Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
Note: File does not exist in v3.1.
  1/*
  2 * SPDX-License-Identifier: MIT
  3 *
  4 * Copyright © 2012-2014 Intel Corporation
  5 *
  6  * Based on amdgpu_mn, which bears the following notice:
  7 *
  8 * Copyright 2014 Advanced Micro Devices, Inc.
  9 * All Rights Reserved.
 10 *
 11 * Permission is hereby granted, free of charge, to any person obtaining a
 12 * copy of this software and associated documentation files (the
 13 * "Software"), to deal in the Software without restriction, including
 14 * without limitation the rights to use, copy, modify, merge, publish,
 15 * distribute, sub license, and/or sell copies of the Software, and to
 16 * permit persons to whom the Software is furnished to do so, subject to
 17 * the following conditions:
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 * The above copyright notice and this permission notice (including the
 28 * next paragraph) shall be included in all copies or substantial portions
 29 * of the Software.
 30 *
 31 */
 32/*
 33 * Authors:
 34 *    Christian König <christian.koenig@amd.com>
 35 */
 36
 37#include <linux/mmu_context.h>
 38#include <linux/mempolicy.h>
 39#include <linux/swap.h>
 40#include <linux/sched/mm.h>
 41
 42#include "i915_drv.h"
 43#include "i915_gem_ioctls.h"
 44#include "i915_gem_object.h"
 45#include "i915_gem_userptr.h"
 46#include "i915_scatterlist.h"
 47
 48#ifdef CONFIG_MMU_NOTIFIER
 49
 50/**
 51 * i915_gem_userptr_invalidate - callback to notify about mm change
 52 *
 53 * @mni: the range (mm) is about to update
 54 * @range: details on the invalidation
 55 * @cur_seq: Value to pass to mmu_interval_set_seq()
 56 *
 57 * Block for operations on BOs to finish and mark pages as accessed and
 58 * potentially dirty.
 59 */
 60static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
 61					const struct mmu_notifier_range *range,
 62					unsigned long cur_seq)
 63{
 64	struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier);
 65	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 66	long r;
 67
 68	if (!mmu_notifier_range_blockable(range))
 69		return false;
 70
 71	write_lock(&i915->mm.notifier_lock);
 72
 73	mmu_interval_set_seq(mni, cur_seq);
 74
 75	write_unlock(&i915->mm.notifier_lock);
 76
 77	/*
 78	 * We don't wait when the process is exiting. This is valid
 79	 * because the object will be cleaned up anyway.
 80	 *
 81	 * This is also temporarily required as a hack, because we
 82	 * cannot currently force non-consistent batch buffers to preempt
 83	 * and reschedule by waiting on it, hanging processes on exit.
 84	 */
 85	if (current->flags & PF_EXITING)
 86		return true;
 87
 88	/* we will unbind on next submission, still have userptr pins */
 89	r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
 90				  MAX_SCHEDULE_TIMEOUT);
 91	if (r <= 0)
 92		drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
 93
 94	return true;
 95}
 96
 97static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
 98	.invalidate = i915_gem_userptr_invalidate,
 99};
100
101static int
102i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
103{
104	return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
105					    obj->userptr.ptr, obj->base.size,
106					    &i915_gem_userptr_notifier_ops);
107}
108
109static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
110{
111	struct page **pvec = NULL;
112
113	assert_object_held_shared(obj);
114
115	if (!--obj->userptr.page_ref) {
116		pvec = obj->userptr.pvec;
117		obj->userptr.pvec = NULL;
118	}
119	GEM_BUG_ON(obj->userptr.page_ref < 0);
120
121	if (pvec) {
122		const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
123
124		unpin_user_pages(pvec, num_pages);
125		kvfree(pvec);
126	}
127}
128
129static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
130{
131	const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
132	unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev);
133	struct sg_table *st;
134	struct page **pvec;
135	int ret;
136
137	st = kmalloc(sizeof(*st), GFP_KERNEL);
138	if (!st)
139		return -ENOMEM;
140
141	if (!obj->userptr.page_ref) {
142		ret = -EAGAIN;
143		goto err_free;
144	}
145
146	obj->userptr.page_ref++;
147	pvec = obj->userptr.pvec;
148
149alloc_table:
150	ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
151						num_pages << PAGE_SHIFT,
152						max_segment, GFP_KERNEL);
153	if (ret)
154		goto err;
155
156	ret = i915_gem_gtt_prepare_pages(obj, st);
157	if (ret) {
158		sg_free_table(st);
159
160		if (max_segment > PAGE_SIZE) {
161			max_segment = PAGE_SIZE;
162			goto alloc_table;
163		}
164
165		goto err;
166	}
167
168	WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE));
169	if (i915_gem_object_can_bypass_llc(obj))
170		obj->cache_dirty = true;
171
172	__i915_gem_object_set_pages(obj, st);
173
174	return 0;
175
176err:
177	i915_gem_object_userptr_drop_ref(obj);
178err_free:
179	kfree(st);
180	return ret;
181}
182
183static void
184i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
185			   struct sg_table *pages)
186{
187	struct sgt_iter sgt_iter;
188	struct page *page;
189
190	if (!pages)
191		return;
192
193	__i915_gem_object_release_shmem(obj, pages, true);
194	i915_gem_gtt_finish_pages(obj, pages);
195
196	/*
197	 * We always mark objects as dirty when they are used by the GPU,
198	 * just in case. However, if we set the vma as being read-only we know
199	 * that the object will never have been written to.
200	 */
201	if (i915_gem_object_is_readonly(obj))
202		obj->mm.dirty = false;
203
204	for_each_sgt_page(page, sgt_iter, pages) {
205		if (obj->mm.dirty && trylock_page(page)) {
206			/*
207			 * As this may not be anonymous memory (e.g. shmem)
208			 * but exist on a real mapping, we have to lock
209			 * the page in order to dirty it -- holding
210			 * the page reference is not sufficient to
211			 * prevent the inode from being truncated.
212			 * Play safe and take the lock.
213			 *
214			 * However...!
215			 *
216			 * The mmu-notifier can be invalidated for a
217			 * migrate_folio, that is alreadying holding the lock
218			 * on the folio. Such a try_to_unmap() will result
219			 * in us calling put_pages() and so recursively try
220			 * to lock the page. We avoid that deadlock with
221			 * a trylock_page() and in exchange we risk missing
222			 * some page dirtying.
223			 */
224			set_page_dirty(page);
225			unlock_page(page);
226		}
227
228		mark_page_accessed(page);
229	}
230	obj->mm.dirty = false;
231
232	sg_free_table(pages);
233	kfree(pages);
234
235	i915_gem_object_userptr_drop_ref(obj);
236}
237
238static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
239{
240	struct sg_table *pages;
241	int err;
242
243	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
244	if (err)
245		return err;
246
247	if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
248		return -EBUSY;
249
250	assert_object_held(obj);
251
252	pages = __i915_gem_object_unset_pages(obj);
253	if (!IS_ERR_OR_NULL(pages))
254		i915_gem_userptr_put_pages(obj, pages);
255
256	return err;
257}
258
259int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
260{
261	const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
262	struct page **pvec;
263	unsigned int gup_flags = 0;
264	unsigned long notifier_seq;
265	int pinned, ret;
266
267	if (obj->userptr.notifier.mm != current->mm)
268		return -EFAULT;
269
270	notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
271
272	ret = i915_gem_object_lock_interruptible(obj, NULL);
273	if (ret)
274		return ret;
275
276	if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
277		i915_gem_object_unlock(obj);
278		return 0;
279	}
280
281	ret = i915_gem_object_userptr_unbind(obj);
282	i915_gem_object_unlock(obj);
283	if (ret)
284		return ret;
285
286	pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
287	if (!pvec)
288		return -ENOMEM;
289
290	if (!i915_gem_object_is_readonly(obj))
291		gup_flags |= FOLL_WRITE;
292
293	pinned = 0;
294	while (pinned < num_pages) {
295		ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
296					  num_pages - pinned, gup_flags,
297					  &pvec[pinned]);
298		if (ret < 0)
299			goto out;
300
301		pinned += ret;
302	}
303
304	ret = i915_gem_object_lock_interruptible(obj, NULL);
305	if (ret)
306		goto out;
307
308	if (mmu_interval_read_retry(&obj->userptr.notifier,
309		!obj->userptr.page_ref ? notifier_seq :
310		obj->userptr.notifier_seq)) {
311		ret = -EAGAIN;
312		goto out_unlock;
313	}
314
315	if (!obj->userptr.page_ref++) {
316		obj->userptr.pvec = pvec;
317		obj->userptr.notifier_seq = notifier_seq;
318		pvec = NULL;
319		ret = ____i915_gem_object_get_pages(obj);
320	}
321
322	obj->userptr.page_ref--;
323
324out_unlock:
325	i915_gem_object_unlock(obj);
326
327out:
328	if (pvec) {
329		unpin_user_pages(pvec, pinned);
330		kvfree(pvec);
331	}
332
333	return ret;
334}
335
336int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
337{
338	if (mmu_interval_read_retry(&obj->userptr.notifier,
339				    obj->userptr.notifier_seq)) {
340		/* We collided with the mmu notifier, need to retry */
341
342		return -EAGAIN;
343	}
344
345	return 0;
346}
347
348int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
349{
350	int err;
351
352	err = i915_gem_object_userptr_submit_init(obj);
353	if (err)
354		return err;
355
356	err = i915_gem_object_lock_interruptible(obj, NULL);
357	if (!err) {
358		/*
359		 * Since we only check validity, not use the pages,
360		 * it doesn't matter if we collide with the mmu notifier,
361		 * and -EAGAIN handling is not required.
362		 */
363		err = i915_gem_object_pin_pages(obj);
364		if (!err)
365			i915_gem_object_unpin_pages(obj);
366
367		i915_gem_object_unlock(obj);
368	}
369
370	return err;
371}
372
373static void
374i915_gem_userptr_release(struct drm_i915_gem_object *obj)
375{
376	GEM_WARN_ON(obj->userptr.page_ref);
377
378	mmu_interval_notifier_remove(&obj->userptr.notifier);
379	obj->userptr.notifier.mm = NULL;
380}
381
382static int
383i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
384{
385	drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
386
387	return -EINVAL;
388}
389
390static int
391i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
392			const struct drm_i915_gem_pwrite *args)
393{
394	drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
395
396	return -EINVAL;
397}
398
399static int
400i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
401		       const struct drm_i915_gem_pread *args)
402{
403	drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
404
405	return -EINVAL;
406}
407
408static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
409	.name = "i915_gem_object_userptr",
410	.flags = I915_GEM_OBJECT_IS_SHRINKABLE |
411		 I915_GEM_OBJECT_NO_MMAP |
412		 I915_GEM_OBJECT_IS_PROXY,
413	.get_pages = i915_gem_userptr_get_pages,
414	.put_pages = i915_gem_userptr_put_pages,
415	.dmabuf_export = i915_gem_userptr_dmabuf_export,
416	.pwrite = i915_gem_userptr_pwrite,
417	.pread = i915_gem_userptr_pread,
418	.release = i915_gem_userptr_release,
419};
420
421#endif
422
423static int
424probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
425{
426	VMA_ITERATOR(vmi, mm, addr);
427	struct vm_area_struct *vma;
428	unsigned long end = addr + len;
429
430	mmap_read_lock(mm);
431	for_each_vma_range(vmi, vma, end) {
432		/* Check for holes, note that we also update the addr below */
433		if (vma->vm_start > addr)
434			break;
435
436		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
437			break;
438
439		addr = vma->vm_end;
440	}
441	mmap_read_unlock(mm);
442
443	if (vma || addr < end)
444		return -EFAULT;
445	return 0;
446}
447
448/*
449 * Creates a new mm object that wraps some normal memory from the process
450 * context - user memory.
451 *
452 * We impose several restrictions upon the memory being mapped
453 * into the GPU.
454 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
455 * 2. It must be normal system memory, not a pointer into another map of IO
456 *    space (e.g. it must not be a GTT mmapping of another object).
457 * 3. We only allow a bo as large as we could in theory map into the GTT,
458 *    that is we limit the size to the total size of the GTT.
459 * 4. The bo is marked as being snoopable. The backing pages are left
460 *    accessible directly by the CPU, but reads and writes by the GPU may
461 *    incur the cost of a snoop (unless you have an LLC architecture).
462 *
463 * Synchronisation between multiple users and the GPU is left to userspace
464 * through the normal set-domain-ioctl. The kernel will enforce that the
465 * GPU relinquishes the VMA before it is returned back to the system
466 * i.e. upon free(), munmap() or process termination. However, the userspace
467 * malloc() library may not immediately relinquish the VMA after free() and
468 * instead reuse it whilst the GPU is still reading and writing to the VMA.
469 * Caveat emptor.
470 *
471 * Also note, that the object created here is not currently a "first class"
472 * object, in that several ioctls are banned. These are the CPU access
473 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
474 * direct access via your pointer rather than use those ioctls. Another
475 * restriction is that we do not allow userptr surfaces to be pinned to the
476 * hardware and so we reject any attempt to create a framebuffer out of a
477 * userptr.
478 *
479 * If you think this is a good interface to use to pass GPU memory between
480 * drivers, please use dma-buf instead. In fact, wherever possible use
481 * dma-buf instead.
482 */
483int
484i915_gem_userptr_ioctl(struct drm_device *dev,
485		       void *data,
486		       struct drm_file *file)
487{
488	static struct lock_class_key __maybe_unused lock_class;
489	struct drm_i915_private *dev_priv = to_i915(dev);
490	struct drm_i915_gem_userptr *args = data;
491	struct drm_i915_gem_object __maybe_unused *obj;
492	int __maybe_unused ret;
493	u32 __maybe_unused handle;
494
495	if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
496		/* We cannot support coherent userptr objects on hw without
497		 * LLC and broken snooping.
498		 */
499		return -ENODEV;
500	}
501
502	if (args->flags & ~(I915_USERPTR_READ_ONLY |
503			    I915_USERPTR_UNSYNCHRONIZED |
504			    I915_USERPTR_PROBE))
505		return -EINVAL;
506
507	if (i915_gem_object_size_2big(args->user_size))
508		return -E2BIG;
509
510	if (!args->user_size)
511		return -EINVAL;
512
513	if (offset_in_page(args->user_ptr | args->user_size))
514		return -EINVAL;
515
516	if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
517		return -EFAULT;
518
519	if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
520		return -ENODEV;
521
522	if (args->flags & I915_USERPTR_READ_ONLY) {
523		/*
524		 * On almost all of the older hw, we cannot tell the GPU that
525		 * a page is readonly.
526		 */
527		if (!to_gt(dev_priv)->vm->has_read_only)
528			return -ENODEV;
529	}
530
531	if (args->flags & I915_USERPTR_PROBE) {
532		/*
533		 * Check that the range pointed to represents real struct
534		 * pages and not iomappings (at this moment in time!)
535		 */
536		ret = probe_range(current->mm, args->user_ptr, args->user_size);
537		if (ret)
538			return ret;
539	}
540
541#ifdef CONFIG_MMU_NOTIFIER
542	obj = i915_gem_object_alloc();
543	if (obj == NULL)
544		return -ENOMEM;
545
546	drm_gem_private_object_init(dev, &obj->base, args->user_size);
547	i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
548			     I915_BO_ALLOC_USER);
549	obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
550	obj->read_domains = I915_GEM_DOMAIN_CPU;
551	obj->write_domain = I915_GEM_DOMAIN_CPU;
552	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
553
554	obj->userptr.ptr = args->user_ptr;
555	obj->userptr.notifier_seq = ULONG_MAX;
556	if (args->flags & I915_USERPTR_READ_ONLY)
557		i915_gem_object_set_readonly(obj);
558
559	/* And keep a pointer to the current->mm for resolving the user pages
560	 * at binding. This means that we need to hook into the mmu_notifier
561	 * in order to detect if the mmu is destroyed.
562	 */
563	ret = i915_gem_userptr_init__mmu_notifier(obj);
564	if (ret == 0)
565		ret = drm_gem_handle_create(file, &obj->base, &handle);
566
567	/* drop reference from allocate - handle holds it now */
568	i915_gem_object_put(obj);
569	if (ret)
570		return ret;
571
572	args->handle = handle;
573	return 0;
574#else
575	return -ENODEV;
576#endif
577}
578
579int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
580{
581#ifdef CONFIG_MMU_NOTIFIER
582	rwlock_init(&dev_priv->mm.notifier_lock);
583#endif
584
585	return 0;
586}
587
588void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
589{
590}