Linux Audio

Check our new training course

Loading...
v3.1
 
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include <ttm/ttm_module.h>
 32#include <ttm/ttm_bo_driver.h>
 33#include <ttm/ttm_placement.h>
 
 
 
 34#include <linux/mm.h>
 
 35#include <linux/rbtree.h>
 36#include <linux/module.h>
 37#include <linux/uaccess.h>
 
 38
 39#define TTM_BO_VM_NUM_PREFAULT 16
 40
 41static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
 42						     unsigned long page_start,
 43						     unsigned long num_pages)
 44{
 45	struct rb_node *cur = bdev->addr_space_rb.rb_node;
 46	unsigned long cur_offset;
 47	struct ttm_buffer_object *bo;
 48	struct ttm_buffer_object *best_bo = NULL;
 49
 50	while (likely(cur != NULL)) {
 51		bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
 52		cur_offset = bo->vm_node->start;
 53		if (page_start >= cur_offset) {
 54			cur = cur->rb_right;
 55			best_bo = bo;
 56			if (page_start == cur_offset)
 57				break;
 58		} else
 59			cur = cur->rb_left;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60	}
 61
 62	if (unlikely(best_bo == NULL))
 63		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64
 65	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
 66		     (page_start + num_pages)))
 67		return NULL;
 68
 69	return best_bo;
 
 70}
 71
 72static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 73{
 
 74	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
 75	    vma->vm_private_data;
 76	struct ttm_bo_device *bdev = bo->bdev;
 77	unsigned long page_offset;
 78	unsigned long page_last;
 79	unsigned long pfn;
 80	struct ttm_tt *ttm = NULL;
 81	struct page *page;
 82	int ret;
 83	int i;
 84	unsigned long address = (unsigned long)vmf->virtual_address;
 85	int retval = VM_FAULT_NOPAGE;
 86	struct ttm_mem_type_manager *man =
 87		&bdev->man[bo->mem.mem_type];
 
 88
 89	/*
 90	 * Work around locking order reversal in fault / nopfn
 91	 * between mmap_sem and bo_reserve: Perform a trylock operation
 92	 * for reserve, and if it fails, retry the fault after scheduling.
 
 93	 */
 
 
 
 
 
 
 
 
 94
 95	ret = ttm_bo_reserve(bo, true, true, false, 0);
 96	if (unlikely(ret != 0)) {
 97		if (ret == -EBUSY)
 98			set_need_resched();
 
 
 
 
 99		return VM_FAULT_NOPAGE;
100	}
101
 
 
 
 
 
 
 
 
 
102	if (bdev->driver->fault_reserve_notify) {
103		ret = bdev->driver->fault_reserve_notify(bo);
104		switch (ret) {
 
 
105		case 0:
106			break;
107		case -EBUSY:
108			set_need_resched();
109		case -ERESTARTSYS:
110			retval = VM_FAULT_NOPAGE;
111			goto out_unlock;
112		default:
113			retval = VM_FAULT_SIGBUS;
114			goto out_unlock;
115		}
 
 
 
 
 
 
 
116	}
117
118	/*
119	 * Wait for buffer data in transit, due to a pipelined
120	 * move.
121	 */
122
123	spin_lock(&bdev->fence_lock);
124	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
125		ret = ttm_bo_wait(bo, false, true, false);
126		spin_unlock(&bdev->fence_lock);
127		if (unlikely(ret != 0)) {
128			retval = (ret != -ERESTARTSYS) ?
129			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
130			goto out_unlock;
131		}
132	} else
133		spin_unlock(&bdev->fence_lock);
134
135	ret = ttm_mem_io_lock(man, true);
136	if (unlikely(ret != 0)) {
137		retval = VM_FAULT_NOPAGE;
138		goto out_unlock;
139	}
140	ret = ttm_mem_io_reserve_vm(bo);
141	if (unlikely(ret != 0)) {
142		retval = VM_FAULT_SIGBUS;
 
 
 
 
 
 
143		goto out_io_unlock;
144	}
145
146	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
147	    bo->vm_node->start - vma->vm_pgoff;
148	page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
149	    bo->vm_node->start - vma->vm_pgoff;
150
151	if (unlikely(page_offset >= bo->num_pages)) {
152		retval = VM_FAULT_SIGBUS;
153		goto out_io_unlock;
154	}
155
156	/*
157	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
158	 * since the mmap_sem is only held in read mode. However, we
159	 * modify only the caching bits of vma->vm_page_prot and
160	 * consider those bits protected by
161	 * the bo->mutex, as we should be the only writers.
162	 * There shouldn't really be any readers of these bits except
163	 * within vm_insert_mixed()? fork?
164	 *
165	 * TODO: Add a list of vmas to the bo, and change the
166	 * vma->vm_page_prot when the object changes caching policy, with
167	 * the correct locks held.
168	 */
 
 
 
169	if (bo->mem.bus.is_iomem) {
170		vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
171						vma->vm_page_prot);
172	} else {
 
 
 
 
 
 
 
173		ttm = bo->ttm;
174		vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
175		    vm_get_page_prot(vma->vm_flags) :
176		    ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
 
 
 
 
 
177	}
178
179	/*
180	 * Speculatively prefault a number of pages. Only error on
181	 * first page.
182	 */
183
184	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
185		if (bo->mem.bus.is_iomem)
186			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
187		else {
188			page = ttm_tt_get_page(ttm, page_offset);
 
 
189			if (unlikely(!page && i == 0)) {
190				retval = VM_FAULT_OOM;
191				goto out_io_unlock;
192			} else if (unlikely(!page)) {
193				break;
194			}
 
 
195			pfn = page_to_pfn(page);
196		}
197
198		ret = vm_insert_mixed(vma, address, pfn);
199		/*
200		 * Somebody beat us to this PTE or prefaulting to
201		 * an already populated PTE, or prefaulting error.
202		 */
203
204		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
205			break;
206		else if (unlikely(ret != 0)) {
207			retval =
208			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
209			goto out_io_unlock;
210		}
211
212		address += PAGE_SIZE;
213		if (unlikely(++page_offset >= page_last))
214			break;
215	}
 
216out_io_unlock:
217	ttm_mem_io_unlock(man);
218out_unlock:
219	ttm_bo_unreserve(bo);
220	return retval;
221}
222
223static void ttm_bo_vm_open(struct vm_area_struct *vma)
224{
225	struct ttm_buffer_object *bo =
226	    (struct ttm_buffer_object *)vma->vm_private_data;
227
228	(void)ttm_bo_reference(bo);
 
 
229}
230
231static void ttm_bo_vm_close(struct vm_area_struct *vma)
232{
233	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
234
235	ttm_bo_unref(&bo);
236	vma->vm_private_data = NULL;
237}
238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239static const struct vm_operations_struct ttm_bo_vm_ops = {
240	.fault = ttm_bo_vm_fault,
241	.open = ttm_bo_vm_open,
242	.close = ttm_bo_vm_close
 
243};
244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
246		struct ttm_bo_device *bdev)
247{
248	struct ttm_bo_driver *driver;
249	struct ttm_buffer_object *bo;
250	int ret;
251
252	read_lock(&bdev->vm_lock);
253	bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
254				 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
255	if (likely(bo != NULL))
256		ttm_bo_reference(bo);
257	read_unlock(&bdev->vm_lock);
258
259	if (unlikely(bo == NULL)) {
260		printk(KERN_ERR TTM_PFX
261		       "Could not find buffer object to map.\n");
262		return -EINVAL;
263	}
264
265	driver = bo->bdev->driver;
266	if (unlikely(!driver->verify_access)) {
267		ret = -EPERM;
268		goto out_unref;
269	}
270	ret = driver->verify_access(bo, filp);
271	if (unlikely(ret != 0))
272		goto out_unref;
273
274	vma->vm_ops = &ttm_bo_vm_ops;
275
276	/*
277	 * Note: We're transferring the bo reference to
278	 * vma->vm_private_data here.
279	 */
280
281	vma->vm_private_data = bo;
282	vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
 
 
 
 
 
 
 
 
 
283	return 0;
284out_unref:
285	ttm_bo_unref(&bo);
286	return ret;
287}
288EXPORT_SYMBOL(ttm_bo_mmap);
289
290int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
291{
292	if (vma->vm_pgoff != 0)
293		return -EACCES;
294
 
 
295	vma->vm_ops = &ttm_bo_vm_ops;
296	vma->vm_private_data = ttm_bo_reference(bo);
297	vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
 
298	return 0;
299}
300EXPORT_SYMBOL(ttm_fbdev_mmap);
301
302
303ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
304		  const char __user *wbuf, char __user *rbuf, size_t count,
305		  loff_t *f_pos, bool write)
306{
307	struct ttm_buffer_object *bo;
308	struct ttm_bo_driver *driver;
309	struct ttm_bo_kmap_obj map;
310	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
311	unsigned long kmap_offset;
312	unsigned long kmap_end;
313	unsigned long kmap_num;
314	size_t io_size;
315	unsigned int page_offset;
316	char *virtual;
317	int ret;
318	bool no_wait = false;
319	bool dummy;
320
321	read_lock(&bdev->vm_lock);
322	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
323	if (likely(bo != NULL))
324		ttm_bo_reference(bo);
325	read_unlock(&bdev->vm_lock);
326
327	if (unlikely(bo == NULL))
328		return -EFAULT;
329
330	driver = bo->bdev->driver;
331	if (unlikely(!driver->verify_access)) {
332		ret = -EPERM;
333		goto out_unref;
334	}
335
336	ret = driver->verify_access(bo, filp);
337	if (unlikely(ret != 0))
338		goto out_unref;
339
340	kmap_offset = dev_offset - bo->vm_node->start;
341	if (unlikely(kmap_offset >= bo->num_pages)) {
342		ret = -EFBIG;
343		goto out_unref;
344	}
345
346	page_offset = *f_pos & ~PAGE_MASK;
347	io_size = bo->num_pages - kmap_offset;
348	io_size = (io_size << PAGE_SHIFT) - page_offset;
349	if (count < io_size)
350		io_size = count;
351
352	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
353	kmap_num = kmap_end - kmap_offset + 1;
354
355	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
356
357	switch (ret) {
358	case 0:
359		break;
360	case -EBUSY:
361		ret = -EAGAIN;
362		goto out_unref;
363	default:
364		goto out_unref;
365	}
366
367	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
368	if (unlikely(ret != 0)) {
369		ttm_bo_unreserve(bo);
370		goto out_unref;
371	}
372
373	virtual = ttm_kmap_obj_virtual(&map, &dummy);
374	virtual += page_offset;
375
376	if (write)
377		ret = copy_from_user(virtual, wbuf, io_size);
378	else
379		ret = copy_to_user(rbuf, virtual, io_size);
380
381	ttm_bo_kunmap(&map);
382	ttm_bo_unreserve(bo);
383	ttm_bo_unref(&bo);
384
385	if (unlikely(ret != 0))
386		return -EFBIG;
387
388	*f_pos += io_size;
389
390	return io_size;
391out_unref:
392	ttm_bo_unref(&bo);
393	return ret;
394}
395
396ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
397			char __user *rbuf, size_t count, loff_t *f_pos,
398			bool write)
399{
400	struct ttm_bo_kmap_obj map;
401	unsigned long kmap_offset;
402	unsigned long kmap_end;
403	unsigned long kmap_num;
404	size_t io_size;
405	unsigned int page_offset;
406	char *virtual;
407	int ret;
408	bool no_wait = false;
409	bool dummy;
410
411	kmap_offset = (*f_pos >> PAGE_SHIFT);
412	if (unlikely(kmap_offset >= bo->num_pages))
413		return -EFBIG;
414
415	page_offset = *f_pos & ~PAGE_MASK;
416	io_size = bo->num_pages - kmap_offset;
417	io_size = (io_size << PAGE_SHIFT) - page_offset;
418	if (count < io_size)
419		io_size = count;
420
421	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
422	kmap_num = kmap_end - kmap_offset + 1;
423
424	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
425
426	switch (ret) {
427	case 0:
428		break;
429	case -EBUSY:
430		return -EAGAIN;
431	default:
432		return ret;
433	}
434
435	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
436	if (unlikely(ret != 0)) {
437		ttm_bo_unreserve(bo);
438		return ret;
439	}
440
441	virtual = ttm_kmap_obj_virtual(&map, &dummy);
442	virtual += page_offset;
443
444	if (write)
445		ret = copy_from_user(virtual, wbuf, io_size);
446	else
447		ret = copy_to_user(rbuf, virtual, io_size);
448
449	ttm_bo_kunmap(&map);
450	ttm_bo_unreserve(bo);
451	ttm_bo_unref(&bo);
452
453	if (unlikely(ret != 0))
454		return ret;
455
456	*f_pos += io_size;
457
458	return io_size;
459}
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28/*
 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 30 */
 31
 32#define pr_fmt(fmt) "[TTM] " fmt
 33
 34#include <drm/ttm/ttm_module.h>
 35#include <drm/ttm/ttm_bo_driver.h>
 36#include <drm/ttm/ttm_placement.h>
 37#include <drm/drm_vma_manager.h>
 38#include <linux/mm.h>
 39#include <linux/pfn_t.h>
 40#include <linux/rbtree.h>
 41#include <linux/module.h>
 42#include <linux/uaccess.h>
 43#include <linux/mem_encrypt.h>
 44
 45#define TTM_BO_VM_NUM_PREFAULT 16
 46
 47static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 48				struct vm_fault *vmf)
 
 49{
 50	vm_fault_t ret = 0;
 51	int err = 0;
 
 
 52
 53	if (likely(!bo->moving))
 54		goto out_unlock;
 55
 56	/*
 57	 * Quick non-stalling check for idle.
 58	 */
 59	if (dma_fence_is_signaled(bo->moving))
 60		goto out_clear;
 61
 62	/*
 63	 * If possible, avoid waiting for GPU with mmap_sem
 64	 * held.
 65	 */
 66	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
 67		ret = VM_FAULT_RETRY;
 68		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
 69			goto out_unlock;
 70
 71		ttm_bo_get(bo);
 72		up_read(&vmf->vma->vm_mm->mmap_sem);
 73		(void) dma_fence_wait(bo->moving, true);
 74		dma_resv_unlock(bo->base.resv);
 75		ttm_bo_put(bo);
 76		goto out_unlock;
 77	}
 78
 79	/*
 80	 * Ordinary wait.
 81	 */
 82	err = dma_fence_wait(bo->moving, true);
 83	if (unlikely(err != 0)) {
 84		ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
 85			VM_FAULT_NOPAGE;
 86		goto out_unlock;
 87	}
 88
 89out_clear:
 90	dma_fence_put(bo->moving);
 91	bo->moving = NULL;
 92
 93out_unlock:
 94	return ret;
 95}
 96
 97static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
 98				       unsigned long page_offset)
 99{
100	struct ttm_bo_device *bdev = bo->bdev;
101
102	if (bdev->driver->io_mem_pfn)
103		return bdev->driver->io_mem_pfn(bo, page_offset);
 
104
105	return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
106		+ page_offset;
107}
108
109static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
110{
111	struct vm_area_struct *vma = vmf->vma;
112	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
113	    vma->vm_private_data;
114	struct ttm_bo_device *bdev = bo->bdev;
115	unsigned long page_offset;
116	unsigned long page_last;
117	unsigned long pfn;
118	struct ttm_tt *ttm = NULL;
119	struct page *page;
120	int err;
121	int i;
122	vm_fault_t ret = VM_FAULT_NOPAGE;
123	unsigned long address = vmf->address;
124	struct ttm_mem_type_manager *man =
125		&bdev->man[bo->mem.mem_type];
126	struct vm_area_struct cvma;
127
128	/*
129	 * Work around locking order reversal in fault / nopfn
130	 * between mmap_sem and bo_reserve: Perform a trylock operation
131	 * for reserve, and if it fails, retry the fault after waiting
132	 * for the buffer to become unreserved.
133	 */
134	if (unlikely(!dma_resv_trylock(bo->base.resv))) {
135		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
136			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
137				ttm_bo_get(bo);
138				up_read(&vmf->vma->vm_mm->mmap_sem);
139				(void) ttm_bo_wait_unreserved(bo);
140				ttm_bo_put(bo);
141			}
142
143			return VM_FAULT_RETRY;
144		}
145
146		/*
147		 * If we'd want to change locking order to
148		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
149		 * instead of retrying the fault...
150		 */
151		return VM_FAULT_NOPAGE;
152	}
153
154	/*
155	 * Refuse to fault imported pages. This should be handled
156	 * (if at all) by redirecting mmap to the exporter.
157	 */
158	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
159		ret = VM_FAULT_SIGBUS;
160		goto out_unlock;
161	}
162
163	if (bdev->driver->fault_reserve_notify) {
164		struct dma_fence *moving = dma_fence_get(bo->moving);
165
166		err = bdev->driver->fault_reserve_notify(bo);
167		switch (err) {
168		case 0:
169			break;
170		case -EBUSY:
 
171		case -ERESTARTSYS:
172			ret = VM_FAULT_NOPAGE;
173			goto out_unlock;
174		default:
175			ret = VM_FAULT_SIGBUS;
176			goto out_unlock;
177		}
178
179		if (bo->moving != moving) {
180			spin_lock(&bdev->glob->lru_lock);
181			ttm_bo_move_to_lru_tail(bo, NULL);
182			spin_unlock(&bdev->glob->lru_lock);
183		}
184		dma_fence_put(moving);
185	}
186
187	/*
188	 * Wait for buffer data in transit, due to a pipelined
189	 * move.
190	 */
191	ret = ttm_bo_vm_fault_idle(bo, vmf);
192	if (unlikely(ret != 0)) {
193		if (ret == VM_FAULT_RETRY &&
194		    !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
195			/* The BO has already been unreserved. */
196			return ret;
 
 
 
197		}
 
 
198
 
 
 
199		goto out_unlock;
200	}
201
202	err = ttm_mem_io_lock(man, true);
203	if (unlikely(err != 0)) {
204		ret = VM_FAULT_NOPAGE;
205		goto out_unlock;
206	}
207	err = ttm_mem_io_reserve_vm(bo);
208	if (unlikely(err != 0)) {
209		ret = VM_FAULT_SIGBUS;
210		goto out_io_unlock;
211	}
212
213	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
214		vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
215	page_last = vma_pages(vma) + vma->vm_pgoff -
216		drm_vma_node_start(&bo->base.vma_node);
217
218	if (unlikely(page_offset >= bo->num_pages)) {
219		ret = VM_FAULT_SIGBUS;
220		goto out_io_unlock;
221	}
222
223	/*
224	 * Make a local vma copy to modify the page_prot member
225	 * and vm_flags if necessary. The vma parameter is protected
226	 * by mmap_sem in write mode.
 
 
 
 
 
 
 
 
227	 */
228	cvma = *vma;
229	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
230
231	if (bo->mem.bus.is_iomem) {
232		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
233						cvma.vm_page_prot);
234	} else {
235		struct ttm_operation_ctx ctx = {
236			.interruptible = false,
237			.no_wait_gpu = false,
238			.flags = TTM_OPT_FLAG_FORCE_ALLOC
239
240		};
241
242		ttm = bo->ttm;
243		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
244						cvma.vm_page_prot);
245
246		/* Allocate all page at once, most common usage */
247		if (ttm_tt_populate(ttm, &ctx)) {
248			ret = VM_FAULT_OOM;
249			goto out_io_unlock;
250		}
251	}
252
253	/*
254	 * Speculatively prefault a number of pages. Only error on
255	 * first page.
256	 */
 
257	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
258		if (bo->mem.bus.is_iomem) {
259			/* Iomem should not be marked encrypted */
260			cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
261			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
262		} else {
263			page = ttm->pages[page_offset];
264			if (unlikely(!page && i == 0)) {
265				ret = VM_FAULT_OOM;
266				goto out_io_unlock;
267			} else if (unlikely(!page)) {
268				break;
269			}
270			page->index = drm_vma_node_start(&bo->base.vma_node) +
271				page_offset;
272			pfn = page_to_pfn(page);
273		}
274
275		if (vma->vm_flags & VM_MIXEDMAP)
276			ret = vmf_insert_mixed(&cvma, address,
277					__pfn_to_pfn_t(pfn, PFN_DEV));
278		else
279			ret = vmf_insert_pfn(&cvma, address, pfn);
280
281		/* Never error on prefaulted PTEs */
282		if (unlikely((ret & VM_FAULT_ERROR))) {
283			if (i == 0)
284				goto out_io_unlock;
285			else
286				break;
287		}
288
289		address += PAGE_SIZE;
290		if (unlikely(++page_offset >= page_last))
291			break;
292	}
293	ret = VM_FAULT_NOPAGE;
294out_io_unlock:
295	ttm_mem_io_unlock(man);
296out_unlock:
297	dma_resv_unlock(bo->base.resv);
298	return ret;
299}
300
301static void ttm_bo_vm_open(struct vm_area_struct *vma)
302{
303	struct ttm_buffer_object *bo =
304	    (struct ttm_buffer_object *)vma->vm_private_data;
305
306	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
307
308	ttm_bo_get(bo);
309}
310
311static void ttm_bo_vm_close(struct vm_area_struct *vma)
312{
313	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
314
315	ttm_bo_put(bo);
316	vma->vm_private_data = NULL;
317}
318
319static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
320				 unsigned long offset,
321				 uint8_t *buf, int len, int write)
322{
323	unsigned long page = offset >> PAGE_SHIFT;
324	unsigned long bytes_left = len;
325	int ret;
326
327	/* Copy a page at a time, that way no extra virtual address
328	 * mapping is needed
329	 */
330	offset -= page << PAGE_SHIFT;
331	do {
332		unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
333		struct ttm_bo_kmap_obj map;
334		void *ptr;
335		bool is_iomem;
336
337		ret = ttm_bo_kmap(bo, page, 1, &map);
338		if (ret)
339			return ret;
340
341		ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
342		WARN_ON_ONCE(is_iomem);
343		if (write)
344			memcpy(ptr, buf, bytes);
345		else
346			memcpy(buf, ptr, bytes);
347		ttm_bo_kunmap(&map);
348
349		page++;
350		buf += bytes;
351		bytes_left -= bytes;
352		offset = 0;
353	} while (bytes_left);
354
355	return len;
356}
357
358static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
359			    void *buf, int len, int write)
360{
361	unsigned long offset = (addr) - vma->vm_start;
362	struct ttm_buffer_object *bo = vma->vm_private_data;
363	int ret;
364
365	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
366		return -EIO;
367
368	ret = ttm_bo_reserve(bo, true, false, NULL);
369	if (ret)
370		return ret;
371
372	switch (bo->mem.mem_type) {
373	case TTM_PL_SYSTEM:
374		if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
375			ret = ttm_tt_swapin(bo->ttm);
376			if (unlikely(ret != 0))
377				return ret;
378		}
379		/* fall through */
380	case TTM_PL_TT:
381		ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
382		break;
383	default:
384		if (bo->bdev->driver->access_memory)
385			ret = bo->bdev->driver->access_memory(
386				bo, offset, buf, len, write);
387		else
388			ret = -EIO;
389	}
390
391	ttm_bo_unreserve(bo);
392
393	return ret;
394}
395
396static const struct vm_operations_struct ttm_bo_vm_ops = {
397	.fault = ttm_bo_vm_fault,
398	.open = ttm_bo_vm_open,
399	.close = ttm_bo_vm_close,
400	.access = ttm_bo_vm_access
401};
402
403static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
404						  unsigned long offset,
405						  unsigned long pages)
406{
407	struct drm_vma_offset_node *node;
408	struct ttm_buffer_object *bo = NULL;
409
410	drm_vma_offset_lock_lookup(&bdev->vma_manager);
411
412	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
413	if (likely(node)) {
414		bo = container_of(node, struct ttm_buffer_object,
415				  base.vma_node);
416		bo = ttm_bo_get_unless_zero(bo);
417	}
418
419	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
420
421	if (!bo)
422		pr_err("Could not find buffer object to map\n");
423
424	return bo;
425}
426
427int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
428		struct ttm_bo_device *bdev)
429{
430	struct ttm_bo_driver *driver;
431	struct ttm_buffer_object *bo;
432	int ret;
433
434	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
435		return -EINVAL;
436
437	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
438	if (unlikely(!bo))
 
 
 
 
 
439		return -EINVAL;
 
440
441	driver = bo->bdev->driver;
442	if (unlikely(!driver->verify_access)) {
443		ret = -EPERM;
444		goto out_unref;
445	}
446	ret = driver->verify_access(bo, filp);
447	if (unlikely(ret != 0))
448		goto out_unref;
449
450	vma->vm_ops = &ttm_bo_vm_ops;
451
452	/*
453	 * Note: We're transferring the bo reference to
454	 * vma->vm_private_data here.
455	 */
456
457	vma->vm_private_data = bo;
458
459	/*
460	 * We'd like to use VM_PFNMAP on shared mappings, where
461	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
462	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
463	 * bad for performance. Until that has been sorted out, use
464	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
465	 */
466	vma->vm_flags |= VM_MIXEDMAP;
467	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
468	return 0;
469out_unref:
470	ttm_bo_put(bo);
471	return ret;
472}
473EXPORT_SYMBOL(ttm_bo_mmap);
474
475int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
476{
477	if (vma->vm_pgoff != 0)
478		return -EACCES;
479
480	ttm_bo_get(bo);
481
482	vma->vm_ops = &ttm_bo_vm_ops;
483	vma->vm_private_data = bo;
484	vma->vm_flags |= VM_MIXEDMAP;
485	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
486	return 0;
487}
488EXPORT_SYMBOL(ttm_fbdev_mmap);