Linux Audio

Check our new training course

Loading...
v3.1
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 
 
 31#include <ttm/ttm_module.h>
 32#include <ttm/ttm_bo_driver.h>
 33#include <ttm/ttm_placement.h>
 34#include <linux/mm.h>
 35#include <linux/rbtree.h>
 36#include <linux/module.h>
 37#include <linux/uaccess.h>
 38
 39#define TTM_BO_VM_NUM_PREFAULT 16
 40
 41static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
 42						     unsigned long page_start,
 43						     unsigned long num_pages)
 44{
 45	struct rb_node *cur = bdev->addr_space_rb.rb_node;
 46	unsigned long cur_offset;
 47	struct ttm_buffer_object *bo;
 48	struct ttm_buffer_object *best_bo = NULL;
 49
 50	while (likely(cur != NULL)) {
 51		bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
 52		cur_offset = bo->vm_node->start;
 53		if (page_start >= cur_offset) {
 54			cur = cur->rb_right;
 55			best_bo = bo;
 56			if (page_start == cur_offset)
 57				break;
 58		} else
 59			cur = cur->rb_left;
 60	}
 61
 62	if (unlikely(best_bo == NULL))
 63		return NULL;
 64
 65	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
 66		     (page_start + num_pages)))
 67		return NULL;
 68
 69	return best_bo;
 70}
 71
 72static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 73{
 74	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
 75	    vma->vm_private_data;
 76	struct ttm_bo_device *bdev = bo->bdev;
 77	unsigned long page_offset;
 78	unsigned long page_last;
 79	unsigned long pfn;
 80	struct ttm_tt *ttm = NULL;
 81	struct page *page;
 82	int ret;
 83	int i;
 84	unsigned long address = (unsigned long)vmf->virtual_address;
 85	int retval = VM_FAULT_NOPAGE;
 86	struct ttm_mem_type_manager *man =
 87		&bdev->man[bo->mem.mem_type];
 88
 89	/*
 90	 * Work around locking order reversal in fault / nopfn
 91	 * between mmap_sem and bo_reserve: Perform a trylock operation
 92	 * for reserve, and if it fails, retry the fault after scheduling.
 93	 */
 94
 95	ret = ttm_bo_reserve(bo, true, true, false, 0);
 96	if (unlikely(ret != 0)) {
 97		if (ret == -EBUSY)
 98			set_need_resched();
 99		return VM_FAULT_NOPAGE;
100	}
101
102	if (bdev->driver->fault_reserve_notify) {
103		ret = bdev->driver->fault_reserve_notify(bo);
104		switch (ret) {
105		case 0:
106			break;
107		case -EBUSY:
108			set_need_resched();
109		case -ERESTARTSYS:
110			retval = VM_FAULT_NOPAGE;
111			goto out_unlock;
112		default:
113			retval = VM_FAULT_SIGBUS;
114			goto out_unlock;
115		}
116	}
117
118	/*
119	 * Wait for buffer data in transit, due to a pipelined
120	 * move.
121	 */
122
123	spin_lock(&bdev->fence_lock);
124	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
125		ret = ttm_bo_wait(bo, false, true, false);
126		spin_unlock(&bdev->fence_lock);
127		if (unlikely(ret != 0)) {
128			retval = (ret != -ERESTARTSYS) ?
129			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
130			goto out_unlock;
131		}
132	} else
133		spin_unlock(&bdev->fence_lock);
134
135	ret = ttm_mem_io_lock(man, true);
136	if (unlikely(ret != 0)) {
137		retval = VM_FAULT_NOPAGE;
138		goto out_unlock;
139	}
140	ret = ttm_mem_io_reserve_vm(bo);
141	if (unlikely(ret != 0)) {
142		retval = VM_FAULT_SIGBUS;
143		goto out_io_unlock;
144	}
145
146	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
147	    bo->vm_node->start - vma->vm_pgoff;
148	page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
149	    bo->vm_node->start - vma->vm_pgoff;
150
151	if (unlikely(page_offset >= bo->num_pages)) {
152		retval = VM_FAULT_SIGBUS;
153		goto out_io_unlock;
154	}
155
156	/*
157	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
158	 * since the mmap_sem is only held in read mode. However, we
159	 * modify only the caching bits of vma->vm_page_prot and
160	 * consider those bits protected by
161	 * the bo->mutex, as we should be the only writers.
162	 * There shouldn't really be any readers of these bits except
163	 * within vm_insert_mixed()? fork?
164	 *
165	 * TODO: Add a list of vmas to the bo, and change the
166	 * vma->vm_page_prot when the object changes caching policy, with
167	 * the correct locks held.
168	 */
169	if (bo->mem.bus.is_iomem) {
170		vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
171						vma->vm_page_prot);
172	} else {
173		ttm = bo->ttm;
174		vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
175		    vm_get_page_prot(vma->vm_flags) :
176		    ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
 
 
 
 
 
 
177	}
178
179	/*
180	 * Speculatively prefault a number of pages. Only error on
181	 * first page.
182	 */
183
184	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
185		if (bo->mem.bus.is_iomem)
186			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
187		else {
188			page = ttm_tt_get_page(ttm, page_offset);
189			if (unlikely(!page && i == 0)) {
190				retval = VM_FAULT_OOM;
191				goto out_io_unlock;
192			} else if (unlikely(!page)) {
193				break;
194			}
195			pfn = page_to_pfn(page);
196		}
197
198		ret = vm_insert_mixed(vma, address, pfn);
199		/*
200		 * Somebody beat us to this PTE or prefaulting to
201		 * an already populated PTE, or prefaulting error.
202		 */
203
204		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
205			break;
206		else if (unlikely(ret != 0)) {
207			retval =
208			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
209			goto out_io_unlock;
210		}
211
212		address += PAGE_SIZE;
213		if (unlikely(++page_offset >= page_last))
214			break;
215	}
216out_io_unlock:
217	ttm_mem_io_unlock(man);
218out_unlock:
219	ttm_bo_unreserve(bo);
220	return retval;
221}
222
223static void ttm_bo_vm_open(struct vm_area_struct *vma)
224{
225	struct ttm_buffer_object *bo =
226	    (struct ttm_buffer_object *)vma->vm_private_data;
227
228	(void)ttm_bo_reference(bo);
229}
230
231static void ttm_bo_vm_close(struct vm_area_struct *vma)
232{
233	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
234
235	ttm_bo_unref(&bo);
236	vma->vm_private_data = NULL;
237}
238
239static const struct vm_operations_struct ttm_bo_vm_ops = {
240	.fault = ttm_bo_vm_fault,
241	.open = ttm_bo_vm_open,
242	.close = ttm_bo_vm_close
243};
244
245int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
246		struct ttm_bo_device *bdev)
247{
248	struct ttm_bo_driver *driver;
249	struct ttm_buffer_object *bo;
250	int ret;
251
252	read_lock(&bdev->vm_lock);
253	bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
254				 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
255	if (likely(bo != NULL))
256		ttm_bo_reference(bo);
257	read_unlock(&bdev->vm_lock);
258
259	if (unlikely(bo == NULL)) {
260		printk(KERN_ERR TTM_PFX
261		       "Could not find buffer object to map.\n");
262		return -EINVAL;
263	}
264
265	driver = bo->bdev->driver;
266	if (unlikely(!driver->verify_access)) {
267		ret = -EPERM;
268		goto out_unref;
269	}
270	ret = driver->verify_access(bo, filp);
271	if (unlikely(ret != 0))
272		goto out_unref;
273
274	vma->vm_ops = &ttm_bo_vm_ops;
275
276	/*
277	 * Note: We're transferring the bo reference to
278	 * vma->vm_private_data here.
279	 */
280
281	vma->vm_private_data = bo;
282	vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
283	return 0;
284out_unref:
285	ttm_bo_unref(&bo);
286	return ret;
287}
288EXPORT_SYMBOL(ttm_bo_mmap);
289
290int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
291{
292	if (vma->vm_pgoff != 0)
293		return -EACCES;
294
295	vma->vm_ops = &ttm_bo_vm_ops;
296	vma->vm_private_data = ttm_bo_reference(bo);
297	vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
298	return 0;
299}
300EXPORT_SYMBOL(ttm_fbdev_mmap);
301
302
303ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
304		  const char __user *wbuf, char __user *rbuf, size_t count,
305		  loff_t *f_pos, bool write)
306{
307	struct ttm_buffer_object *bo;
308	struct ttm_bo_driver *driver;
309	struct ttm_bo_kmap_obj map;
310	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
311	unsigned long kmap_offset;
312	unsigned long kmap_end;
313	unsigned long kmap_num;
314	size_t io_size;
315	unsigned int page_offset;
316	char *virtual;
317	int ret;
318	bool no_wait = false;
319	bool dummy;
320
321	read_lock(&bdev->vm_lock);
322	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
323	if (likely(bo != NULL))
324		ttm_bo_reference(bo);
325	read_unlock(&bdev->vm_lock);
326
327	if (unlikely(bo == NULL))
328		return -EFAULT;
329
330	driver = bo->bdev->driver;
331	if (unlikely(!driver->verify_access)) {
332		ret = -EPERM;
333		goto out_unref;
334	}
335
336	ret = driver->verify_access(bo, filp);
337	if (unlikely(ret != 0))
338		goto out_unref;
339
340	kmap_offset = dev_offset - bo->vm_node->start;
341	if (unlikely(kmap_offset >= bo->num_pages)) {
342		ret = -EFBIG;
343		goto out_unref;
344	}
345
346	page_offset = *f_pos & ~PAGE_MASK;
347	io_size = bo->num_pages - kmap_offset;
348	io_size = (io_size << PAGE_SHIFT) - page_offset;
349	if (count < io_size)
350		io_size = count;
351
352	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
353	kmap_num = kmap_end - kmap_offset + 1;
354
355	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
356
357	switch (ret) {
358	case 0:
359		break;
360	case -EBUSY:
361		ret = -EAGAIN;
362		goto out_unref;
363	default:
364		goto out_unref;
365	}
366
367	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
368	if (unlikely(ret != 0)) {
369		ttm_bo_unreserve(bo);
370		goto out_unref;
371	}
372
373	virtual = ttm_kmap_obj_virtual(&map, &dummy);
374	virtual += page_offset;
375
376	if (write)
377		ret = copy_from_user(virtual, wbuf, io_size);
378	else
379		ret = copy_to_user(rbuf, virtual, io_size);
380
381	ttm_bo_kunmap(&map);
382	ttm_bo_unreserve(bo);
383	ttm_bo_unref(&bo);
384
385	if (unlikely(ret != 0))
386		return -EFBIG;
387
388	*f_pos += io_size;
389
390	return io_size;
391out_unref:
392	ttm_bo_unref(&bo);
393	return ret;
394}
395
396ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
397			char __user *rbuf, size_t count, loff_t *f_pos,
398			bool write)
399{
400	struct ttm_bo_kmap_obj map;
401	unsigned long kmap_offset;
402	unsigned long kmap_end;
403	unsigned long kmap_num;
404	size_t io_size;
405	unsigned int page_offset;
406	char *virtual;
407	int ret;
408	bool no_wait = false;
409	bool dummy;
410
411	kmap_offset = (*f_pos >> PAGE_SHIFT);
412	if (unlikely(kmap_offset >= bo->num_pages))
413		return -EFBIG;
414
415	page_offset = *f_pos & ~PAGE_MASK;
416	io_size = bo->num_pages - kmap_offset;
417	io_size = (io_size << PAGE_SHIFT) - page_offset;
418	if (count < io_size)
419		io_size = count;
420
421	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
422	kmap_num = kmap_end - kmap_offset + 1;
423
424	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
425
426	switch (ret) {
427	case 0:
428		break;
429	case -EBUSY:
430		return -EAGAIN;
431	default:
432		return ret;
433	}
434
435	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
436	if (unlikely(ret != 0)) {
437		ttm_bo_unreserve(bo);
438		return ret;
439	}
440
441	virtual = ttm_kmap_obj_virtual(&map, &dummy);
442	virtual += page_offset;
443
444	if (write)
445		ret = copy_from_user(virtual, wbuf, io_size);
446	else
447		ret = copy_to_user(rbuf, virtual, io_size);
448
449	ttm_bo_kunmap(&map);
450	ttm_bo_unreserve(bo);
451	ttm_bo_unref(&bo);
452
453	if (unlikely(ret != 0))
454		return ret;
455
456	*f_pos += io_size;
457
458	return io_size;
459}
v3.5.6
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#define pr_fmt(fmt) "[TTM] " fmt
 32
 33#include <ttm/ttm_module.h>
 34#include <ttm/ttm_bo_driver.h>
 35#include <ttm/ttm_placement.h>
 36#include <linux/mm.h>
 37#include <linux/rbtree.h>
 38#include <linux/module.h>
 39#include <linux/uaccess.h>
 40
 41#define TTM_BO_VM_NUM_PREFAULT 16
 42
 43static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
 44						     unsigned long page_start,
 45						     unsigned long num_pages)
 46{
 47	struct rb_node *cur = bdev->addr_space_rb.rb_node;
 48	unsigned long cur_offset;
 49	struct ttm_buffer_object *bo;
 50	struct ttm_buffer_object *best_bo = NULL;
 51
 52	while (likely(cur != NULL)) {
 53		bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
 54		cur_offset = bo->vm_node->start;
 55		if (page_start >= cur_offset) {
 56			cur = cur->rb_right;
 57			best_bo = bo;
 58			if (page_start == cur_offset)
 59				break;
 60		} else
 61			cur = cur->rb_left;
 62	}
 63
 64	if (unlikely(best_bo == NULL))
 65		return NULL;
 66
 67	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
 68		     (page_start + num_pages)))
 69		return NULL;
 70
 71	return best_bo;
 72}
 73
 74static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 75{
 76	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
 77	    vma->vm_private_data;
 78	struct ttm_bo_device *bdev = bo->bdev;
 79	unsigned long page_offset;
 80	unsigned long page_last;
 81	unsigned long pfn;
 82	struct ttm_tt *ttm = NULL;
 83	struct page *page;
 84	int ret;
 85	int i;
 86	unsigned long address = (unsigned long)vmf->virtual_address;
 87	int retval = VM_FAULT_NOPAGE;
 88	struct ttm_mem_type_manager *man =
 89		&bdev->man[bo->mem.mem_type];
 90
 91	/*
 92	 * Work around locking order reversal in fault / nopfn
 93	 * between mmap_sem and bo_reserve: Perform a trylock operation
 94	 * for reserve, and if it fails, retry the fault after scheduling.
 95	 */
 96
 97	ret = ttm_bo_reserve(bo, true, true, false, 0);
 98	if (unlikely(ret != 0)) {
 99		if (ret == -EBUSY)
100			set_need_resched();
101		return VM_FAULT_NOPAGE;
102	}
103
104	if (bdev->driver->fault_reserve_notify) {
105		ret = bdev->driver->fault_reserve_notify(bo);
106		switch (ret) {
107		case 0:
108			break;
109		case -EBUSY:
110			set_need_resched();
111		case -ERESTARTSYS:
112			retval = VM_FAULT_NOPAGE;
113			goto out_unlock;
114		default:
115			retval = VM_FAULT_SIGBUS;
116			goto out_unlock;
117		}
118	}
119
120	/*
121	 * Wait for buffer data in transit, due to a pipelined
122	 * move.
123	 */
124
125	spin_lock(&bdev->fence_lock);
126	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
127		ret = ttm_bo_wait(bo, false, true, false);
128		spin_unlock(&bdev->fence_lock);
129		if (unlikely(ret != 0)) {
130			retval = (ret != -ERESTARTSYS) ?
131			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
132			goto out_unlock;
133		}
134	} else
135		spin_unlock(&bdev->fence_lock);
136
137	ret = ttm_mem_io_lock(man, true);
138	if (unlikely(ret != 0)) {
139		retval = VM_FAULT_NOPAGE;
140		goto out_unlock;
141	}
142	ret = ttm_mem_io_reserve_vm(bo);
143	if (unlikely(ret != 0)) {
144		retval = VM_FAULT_SIGBUS;
145		goto out_io_unlock;
146	}
147
148	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
149	    bo->vm_node->start - vma->vm_pgoff;
150	page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
151	    bo->vm_node->start - vma->vm_pgoff;
152
153	if (unlikely(page_offset >= bo->num_pages)) {
154		retval = VM_FAULT_SIGBUS;
155		goto out_io_unlock;
156	}
157
158	/*
159	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
160	 * since the mmap_sem is only held in read mode. However, we
161	 * modify only the caching bits of vma->vm_page_prot and
162	 * consider those bits protected by
163	 * the bo->mutex, as we should be the only writers.
164	 * There shouldn't really be any readers of these bits except
165	 * within vm_insert_mixed()? fork?
166	 *
167	 * TODO: Add a list of vmas to the bo, and change the
168	 * vma->vm_page_prot when the object changes caching policy, with
169	 * the correct locks held.
170	 */
171	if (bo->mem.bus.is_iomem) {
172		vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
173						vma->vm_page_prot);
174	} else {
175		ttm = bo->ttm;
176		vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
177		    vm_get_page_prot(vma->vm_flags) :
178		    ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
179
180		/* Allocate all page at once, most common usage */
181		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
182			retval = VM_FAULT_OOM;
183			goto out_io_unlock;
184		}
185	}
186
187	/*
188	 * Speculatively prefault a number of pages. Only error on
189	 * first page.
190	 */
 
191	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
192		if (bo->mem.bus.is_iomem)
193			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
194		else {
195			page = ttm->pages[page_offset];
196			if (unlikely(!page && i == 0)) {
197				retval = VM_FAULT_OOM;
198				goto out_io_unlock;
199			} else if (unlikely(!page)) {
200				break;
201			}
202			pfn = page_to_pfn(page);
203		}
204
205		ret = vm_insert_mixed(vma, address, pfn);
206		/*
207		 * Somebody beat us to this PTE or prefaulting to
208		 * an already populated PTE, or prefaulting error.
209		 */
210
211		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
212			break;
213		else if (unlikely(ret != 0)) {
214			retval =
215			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
216			goto out_io_unlock;
217		}
218
219		address += PAGE_SIZE;
220		if (unlikely(++page_offset >= page_last))
221			break;
222	}
223out_io_unlock:
224	ttm_mem_io_unlock(man);
225out_unlock:
226	ttm_bo_unreserve(bo);
227	return retval;
228}
229
230static void ttm_bo_vm_open(struct vm_area_struct *vma)
231{
232	struct ttm_buffer_object *bo =
233	    (struct ttm_buffer_object *)vma->vm_private_data;
234
235	(void)ttm_bo_reference(bo);
236}
237
238static void ttm_bo_vm_close(struct vm_area_struct *vma)
239{
240	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
241
242	ttm_bo_unref(&bo);
243	vma->vm_private_data = NULL;
244}
245
246static const struct vm_operations_struct ttm_bo_vm_ops = {
247	.fault = ttm_bo_vm_fault,
248	.open = ttm_bo_vm_open,
249	.close = ttm_bo_vm_close
250};
251
252int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
253		struct ttm_bo_device *bdev)
254{
255	struct ttm_bo_driver *driver;
256	struct ttm_buffer_object *bo;
257	int ret;
258
259	read_lock(&bdev->vm_lock);
260	bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
261				 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
262	if (likely(bo != NULL))
263		ttm_bo_reference(bo);
264	read_unlock(&bdev->vm_lock);
265
266	if (unlikely(bo == NULL)) {
267		pr_err("Could not find buffer object to map\n");
 
268		return -EINVAL;
269	}
270
271	driver = bo->bdev->driver;
272	if (unlikely(!driver->verify_access)) {
273		ret = -EPERM;
274		goto out_unref;
275	}
276	ret = driver->verify_access(bo, filp);
277	if (unlikely(ret != 0))
278		goto out_unref;
279
280	vma->vm_ops = &ttm_bo_vm_ops;
281
282	/*
283	 * Note: We're transferring the bo reference to
284	 * vma->vm_private_data here.
285	 */
286
287	vma->vm_private_data = bo;
288	vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
289	return 0;
290out_unref:
291	ttm_bo_unref(&bo);
292	return ret;
293}
294EXPORT_SYMBOL(ttm_bo_mmap);
295
296int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
297{
298	if (vma->vm_pgoff != 0)
299		return -EACCES;
300
301	vma->vm_ops = &ttm_bo_vm_ops;
302	vma->vm_private_data = ttm_bo_reference(bo);
303	vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
304	return 0;
305}
306EXPORT_SYMBOL(ttm_fbdev_mmap);
307
308
309ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
310		  const char __user *wbuf, char __user *rbuf, size_t count,
311		  loff_t *f_pos, bool write)
312{
313	struct ttm_buffer_object *bo;
314	struct ttm_bo_driver *driver;
315	struct ttm_bo_kmap_obj map;
316	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
317	unsigned long kmap_offset;
318	unsigned long kmap_end;
319	unsigned long kmap_num;
320	size_t io_size;
321	unsigned int page_offset;
322	char *virtual;
323	int ret;
324	bool no_wait = false;
325	bool dummy;
326
327	read_lock(&bdev->vm_lock);
328	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
329	if (likely(bo != NULL))
330		ttm_bo_reference(bo);
331	read_unlock(&bdev->vm_lock);
332
333	if (unlikely(bo == NULL))
334		return -EFAULT;
335
336	driver = bo->bdev->driver;
337	if (unlikely(!driver->verify_access)) {
338		ret = -EPERM;
339		goto out_unref;
340	}
341
342	ret = driver->verify_access(bo, filp);
343	if (unlikely(ret != 0))
344		goto out_unref;
345
346	kmap_offset = dev_offset - bo->vm_node->start;
347	if (unlikely(kmap_offset >= bo->num_pages)) {
348		ret = -EFBIG;
349		goto out_unref;
350	}
351
352	page_offset = *f_pos & ~PAGE_MASK;
353	io_size = bo->num_pages - kmap_offset;
354	io_size = (io_size << PAGE_SHIFT) - page_offset;
355	if (count < io_size)
356		io_size = count;
357
358	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
359	kmap_num = kmap_end - kmap_offset + 1;
360
361	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
362
363	switch (ret) {
364	case 0:
365		break;
366	case -EBUSY:
367		ret = -EAGAIN;
368		goto out_unref;
369	default:
370		goto out_unref;
371	}
372
373	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
374	if (unlikely(ret != 0)) {
375		ttm_bo_unreserve(bo);
376		goto out_unref;
377	}
378
379	virtual = ttm_kmap_obj_virtual(&map, &dummy);
380	virtual += page_offset;
381
382	if (write)
383		ret = copy_from_user(virtual, wbuf, io_size);
384	else
385		ret = copy_to_user(rbuf, virtual, io_size);
386
387	ttm_bo_kunmap(&map);
388	ttm_bo_unreserve(bo);
389	ttm_bo_unref(&bo);
390
391	if (unlikely(ret != 0))
392		return -EFBIG;
393
394	*f_pos += io_size;
395
396	return io_size;
397out_unref:
398	ttm_bo_unref(&bo);
399	return ret;
400}
401
402ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
403			char __user *rbuf, size_t count, loff_t *f_pos,
404			bool write)
405{
406	struct ttm_bo_kmap_obj map;
407	unsigned long kmap_offset;
408	unsigned long kmap_end;
409	unsigned long kmap_num;
410	size_t io_size;
411	unsigned int page_offset;
412	char *virtual;
413	int ret;
414	bool no_wait = false;
415	bool dummy;
416
417	kmap_offset = (*f_pos >> PAGE_SHIFT);
418	if (unlikely(kmap_offset >= bo->num_pages))
419		return -EFBIG;
420
421	page_offset = *f_pos & ~PAGE_MASK;
422	io_size = bo->num_pages - kmap_offset;
423	io_size = (io_size << PAGE_SHIFT) - page_offset;
424	if (count < io_size)
425		io_size = count;
426
427	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
428	kmap_num = kmap_end - kmap_offset + 1;
429
430	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
431
432	switch (ret) {
433	case 0:
434		break;
435	case -EBUSY:
436		return -EAGAIN;
437	default:
438		return ret;
439	}
440
441	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
442	if (unlikely(ret != 0)) {
443		ttm_bo_unreserve(bo);
444		return ret;
445	}
446
447	virtual = ttm_kmap_obj_virtual(&map, &dummy);
448	virtual += page_offset;
449
450	if (write)
451		ret = copy_from_user(virtual, wbuf, io_size);
452	else
453		ret = copy_to_user(rbuf, virtual, io_size);
454
455	ttm_bo_kunmap(&map);
456	ttm_bo_unreserve(bo);
457	ttm_bo_unref(&bo);
458
459	if (unlikely(ret != 0))
460		return ret;
461
462	*f_pos += io_size;
463
464	return io_size;
465}