Linux Audio

Check our new training course

Loading...
v4.6
  1/**
  2 * \file drm_vm.c
  3 * Memory mapping for DRM
  4 *
  5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6 * \author Gareth Hughes <gareth@valinux.com>
  7 */
  8
  9/*
 10 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
 11 *
 12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 14 * All Rights Reserved.
 15 *
 16 * Permission is hereby granted, free of charge, to any person obtaining a
 17 * copy of this software and associated documentation files (the "Software"),
 18 * to deal in the Software without restriction, including without limitation
 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 20 * and/or sell copies of the Software, and to permit persons to whom the
 21 * Software is furnished to do so, subject to the following conditions:
 22 *
 23 * The above copyright notice and this permission notice (including the next
 24 * paragraph) shall be included in all copies or substantial portions of the
 25 * Software.
 26 *
 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 33 * OTHER DEALINGS IN THE SOFTWARE.
 34 */
 35
 36#include <drm/drmP.h>
 37#include <linux/export.h>
 38#include <linux/seq_file.h>
 39#if defined(__ia64__)
 40#include <linux/efi.h>
 41#include <linux/slab.h>
 42#endif
 43#include <asm/pgtable.h>
 44#include "drm_internal.h"
 45#include "drm_legacy.h"
 46
 47struct drm_vma_entry {
 48	struct list_head head;
 49	struct vm_area_struct *vma;
 50	pid_t pid;
 51};
 52
 53static void drm_vm_open(struct vm_area_struct *vma);
 54static void drm_vm_close(struct vm_area_struct *vma);
 55
 56static pgprot_t drm_io_prot(struct drm_local_map *map,
 57			    struct vm_area_struct *vma)
 58{
 59	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 60
 61#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
 62	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
 63		tmp = pgprot_noncached(tmp);
 64	else
 65		tmp = pgprot_writecombine(tmp);
 
 
 
 
 66#elif defined(__ia64__)
 67	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
 68				    vma->vm_start))
 69		tmp = pgprot_writecombine(tmp);
 70	else
 71		tmp = pgprot_noncached(tmp);
 72#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
 73	tmp = pgprot_noncached(tmp);
 74#endif
 75	return tmp;
 76}
 77
 78static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
 79{
 80	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 81
 82#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
 83	tmp |= _PAGE_NO_CACHE;
 84#endif
 85	return tmp;
 86}
 87
 88/**
 89 * \c fault method for AGP virtual memory.
 90 *
 91 * \param vma virtual memory area.
 92 * \param address access address.
 93 * \return pointer to the page structure.
 94 *
 95 * Find the right map and if it's AGP memory find the real physical page to
 96 * map, get the page, increment the use count and return it.
 97 */
 98#if IS_ENABLED(CONFIG_AGP)
 99static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
100{
101	struct drm_file *priv = vma->vm_file->private_data;
102	struct drm_device *dev = priv->minor->dev;
103	struct drm_local_map *map = NULL;
104	struct drm_map_list *r_list;
105	struct drm_hash_item *hash;
106
107	/*
108	 * Find the right map
109	 */
110	if (!dev->agp)
111		goto vm_fault_error;
112
113	if (!dev->agp || !dev->agp->cant_use_aperture)
114		goto vm_fault_error;
115
116	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
117		goto vm_fault_error;
118
119	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
120	map = r_list->map;
121
122	if (map && map->type == _DRM_AGP) {
123		/*
124		 * Using vm_pgoff as a selector forces us to use this unusual
125		 * addressing scheme.
126		 */
127		resource_size_t offset = (unsigned long)vmf->virtual_address -
128			vma->vm_start;
129		resource_size_t baddr = map->offset + offset;
130		struct drm_agp_mem *agpmem;
131		struct page *page;
132
133#ifdef __alpha__
134		/*
135		 * Adjust to a bus-relative address
136		 */
137		baddr -= dev->hose->mem_space->start;
138#endif
139
140		/*
141		 * It's AGP memory - find the real physical page to map
142		 */
143		list_for_each_entry(agpmem, &dev->agp->memory, head) {
144			if (agpmem->bound <= baddr &&
145			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
146				break;
147		}
148
149		if (&agpmem->head == &dev->agp->memory)
150			goto vm_fault_error;
151
152		/*
153		 * Get the page, inc the use count, and return it
154		 */
155		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
156		page = agpmem->memory->pages[offset];
157		get_page(page);
158		vmf->page = page;
159
160		DRM_DEBUG
161		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
162		     (unsigned long long)baddr,
163		     agpmem->memory->pages[offset],
164		     (unsigned long long)offset,
165		     page_count(page));
166		return 0;
167	}
168vm_fault_error:
169	return VM_FAULT_SIGBUS;	/* Disallow mremap */
170}
171#else
172static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
173{
174	return VM_FAULT_SIGBUS;
175}
176#endif
177
178/**
179 * \c nopage method for shared virtual memory.
180 *
181 * \param vma virtual memory area.
182 * \param address access address.
183 * \return pointer to the page structure.
184 *
185 * Get the mapping, find the real physical page to map, get the page, and
186 * return it.
187 */
188static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
189{
190	struct drm_local_map *map = vma->vm_private_data;
191	unsigned long offset;
192	unsigned long i;
193	struct page *page;
194
195	if (!map)
196		return VM_FAULT_SIGBUS;	/* Nothing allocated */
197
198	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
199	i = (unsigned long)map->handle + offset;
200	page = vmalloc_to_page((void *)i);
201	if (!page)
202		return VM_FAULT_SIGBUS;
203	get_page(page);
204	vmf->page = page;
205
206	DRM_DEBUG("shm_fault 0x%lx\n", offset);
207	return 0;
208}
209
210/**
211 * \c close method for shared virtual memory.
212 *
213 * \param vma virtual memory area.
214 *
215 * Deletes map information if we are the last
216 * person to close a mapping and it's not in the global maplist.
217 */
218static void drm_vm_shm_close(struct vm_area_struct *vma)
219{
220	struct drm_file *priv = vma->vm_file->private_data;
221	struct drm_device *dev = priv->minor->dev;
222	struct drm_vma_entry *pt, *temp;
223	struct drm_local_map *map;
224	struct drm_map_list *r_list;
225	int found_maps = 0;
226
227	DRM_DEBUG("0x%08lx,0x%08lx\n",
228		  vma->vm_start, vma->vm_end - vma->vm_start);
229
230	map = vma->vm_private_data;
231
232	mutex_lock(&dev->struct_mutex);
233	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
234		if (pt->vma->vm_private_data == map)
235			found_maps++;
236		if (pt->vma == vma) {
237			list_del(&pt->head);
238			kfree(pt);
239		}
240	}
241
242	/* We were the only map that was found */
243	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
244		/* Check to see if we are in the maplist, if we are not, then
245		 * we delete this mappings information.
246		 */
247		found_maps = 0;
248		list_for_each_entry(r_list, &dev->maplist, head) {
249			if (r_list->map == map)
250				found_maps++;
251		}
252
253		if (!found_maps) {
254			drm_dma_handle_t dmah;
255
256			switch (map->type) {
257			case _DRM_REGISTERS:
258			case _DRM_FRAME_BUFFER:
259				arch_phys_wc_del(map->mtrr);
260				iounmap(map->handle);
261				break;
262			case _DRM_SHM:
263				vfree(map->handle);
264				break;
265			case _DRM_AGP:
266			case _DRM_SCATTER_GATHER:
267				break;
268			case _DRM_CONSISTENT:
269				dmah.vaddr = map->handle;
270				dmah.busaddr = map->offset;
271				dmah.size = map->size;
272				__drm_legacy_pci_free(dev, &dmah);
273				break;
274			}
275			kfree(map);
276		}
277	}
278	mutex_unlock(&dev->struct_mutex);
279}
280
281/**
282 * \c fault method for DMA virtual memory.
283 *
284 * \param vma virtual memory area.
285 * \param address access address.
286 * \return pointer to the page structure.
287 *
288 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
289 */
290static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
291{
292	struct drm_file *priv = vma->vm_file->private_data;
293	struct drm_device *dev = priv->minor->dev;
294	struct drm_device_dma *dma = dev->dma;
295	unsigned long offset;
296	unsigned long page_nr;
297	struct page *page;
298
299	if (!dma)
300		return VM_FAULT_SIGBUS;	/* Error */
301	if (!dma->pagelist)
302		return VM_FAULT_SIGBUS;	/* Nothing allocated */
303
304	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
305	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
306	page = virt_to_page((void *)dma->pagelist[page_nr]);
307
308	get_page(page);
309	vmf->page = page;
310
311	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
312	return 0;
313}
314
315/**
316 * \c fault method for scatter-gather virtual memory.
317 *
318 * \param vma virtual memory area.
319 * \param address access address.
320 * \return pointer to the page structure.
321 *
322 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
323 */
324static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
325{
326	struct drm_local_map *map = vma->vm_private_data;
327	struct drm_file *priv = vma->vm_file->private_data;
328	struct drm_device *dev = priv->minor->dev;
329	struct drm_sg_mem *entry = dev->sg;
330	unsigned long offset;
331	unsigned long map_offset;
332	unsigned long page_offset;
333	struct page *page;
334
335	if (!entry)
336		return VM_FAULT_SIGBUS;	/* Error */
337	if (!entry->pagelist)
338		return VM_FAULT_SIGBUS;	/* Nothing allocated */
339
340	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
341	map_offset = map->offset - (unsigned long)dev->sg->virtual;
342	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
343	page = entry->pagelist[page_offset];
344	get_page(page);
345	vmf->page = page;
346
347	return 0;
348}
349
350static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
351{
352	return drm_do_vm_fault(vma, vmf);
353}
354
355static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
356{
357	return drm_do_vm_shm_fault(vma, vmf);
358}
359
360static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
361{
362	return drm_do_vm_dma_fault(vma, vmf);
363}
364
365static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
366{
367	return drm_do_vm_sg_fault(vma, vmf);
368}
369
370/** AGP virtual memory operations */
371static const struct vm_operations_struct drm_vm_ops = {
372	.fault = drm_vm_fault,
373	.open = drm_vm_open,
374	.close = drm_vm_close,
375};
376
377/** Shared virtual memory operations */
378static const struct vm_operations_struct drm_vm_shm_ops = {
379	.fault = drm_vm_shm_fault,
380	.open = drm_vm_open,
381	.close = drm_vm_shm_close,
382};
383
384/** DMA virtual memory operations */
385static const struct vm_operations_struct drm_vm_dma_ops = {
386	.fault = drm_vm_dma_fault,
387	.open = drm_vm_open,
388	.close = drm_vm_close,
389};
390
391/** Scatter-gather virtual memory operations */
392static const struct vm_operations_struct drm_vm_sg_ops = {
393	.fault = drm_vm_sg_fault,
394	.open = drm_vm_open,
395	.close = drm_vm_close,
396};
397
398/**
399 * \c open method for shared virtual memory.
400 *
401 * \param vma virtual memory area.
402 *
403 * Create a new drm_vma_entry structure as the \p vma private data entry and
404 * add it to drm_device::vmalist.
405 */
406void drm_vm_open_locked(struct drm_device *dev,
407		struct vm_area_struct *vma)
408{
409	struct drm_vma_entry *vma_entry;
410
411	DRM_DEBUG("0x%08lx,0x%08lx\n",
412		  vma->vm_start, vma->vm_end - vma->vm_start);
413
414	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
415	if (vma_entry) {
416		vma_entry->vma = vma;
417		vma_entry->pid = current->pid;
418		list_add(&vma_entry->head, &dev->vmalist);
419	}
420}
 
421
422static void drm_vm_open(struct vm_area_struct *vma)
423{
424	struct drm_file *priv = vma->vm_file->private_data;
425	struct drm_device *dev = priv->minor->dev;
426
427	mutex_lock(&dev->struct_mutex);
428	drm_vm_open_locked(dev, vma);
429	mutex_unlock(&dev->struct_mutex);
430}
431
432void drm_vm_close_locked(struct drm_device *dev,
433		struct vm_area_struct *vma)
434{
435	struct drm_vma_entry *pt, *temp;
436
437	DRM_DEBUG("0x%08lx,0x%08lx\n",
438		  vma->vm_start, vma->vm_end - vma->vm_start);
439
440	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
441		if (pt->vma == vma) {
442			list_del(&pt->head);
443			kfree(pt);
444			break;
445		}
446	}
447}
448
449/**
450 * \c close method for all virtual memory types.
451 *
452 * \param vma virtual memory area.
453 *
454 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
455 * free it.
456 */
457static void drm_vm_close(struct vm_area_struct *vma)
458{
459	struct drm_file *priv = vma->vm_file->private_data;
460	struct drm_device *dev = priv->minor->dev;
461
462	mutex_lock(&dev->struct_mutex);
463	drm_vm_close_locked(dev, vma);
464	mutex_unlock(&dev->struct_mutex);
465}
466
467/**
468 * mmap DMA memory.
469 *
470 * \param file_priv DRM file private.
471 * \param vma virtual memory area.
472 * \return zero on success or a negative number on failure.
473 *
474 * Sets the virtual memory area operations structure to vm_dma_ops, the file
475 * pointer, and calls vm_open().
476 */
477static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
478{
479	struct drm_file *priv = filp->private_data;
480	struct drm_device *dev;
481	struct drm_device_dma *dma;
482	unsigned long length = vma->vm_end - vma->vm_start;
483
484	dev = priv->minor->dev;
485	dma = dev->dma;
486	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
487		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
488
489	/* Length must match exact page count */
490	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
491		return -EINVAL;
492	}
493
494	if (!capable(CAP_SYS_ADMIN) &&
495	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
496		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
497#if defined(__i386__) || defined(__x86_64__)
498		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
499#else
500		/* Ye gads this is ugly.  With more thought
501		   we could move this up higher and use
502		   `protection_map' instead.  */
503		vma->vm_page_prot =
504		    __pgprot(pte_val
505			     (pte_wrprotect
506			      (__pte(pgprot_val(vma->vm_page_prot)))));
507#endif
508	}
509
510	vma->vm_ops = &drm_vm_dma_ops;
511
512	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
513
514	drm_vm_open_locked(dev, vma);
515	return 0;
516}
517
518static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
519{
520#ifdef __alpha__
521	return dev->hose->dense_mem_base;
522#else
523	return 0;
524#endif
525}
526
527/**
528 * mmap DMA memory.
529 *
530 * \param file_priv DRM file private.
531 * \param vma virtual memory area.
532 * \return zero on success or a negative number on failure.
533 *
534 * If the virtual memory area has no offset associated with it then it's a DMA
535 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
536 * checks that the restricted flag is not set, sets the virtual memory operations
537 * according to the mapping type and remaps the pages. Finally sets the file
538 * pointer and calls vm_open().
539 */
540static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
541{
542	struct drm_file *priv = filp->private_data;
543	struct drm_device *dev = priv->minor->dev;
544	struct drm_local_map *map = NULL;
545	resource_size_t offset = 0;
546	struct drm_hash_item *hash;
547
548	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
549		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
550
551	if (!priv->authenticated)
552		return -EACCES;
553
554	/* We check for "dma". On Apple's UniNorth, it's valid to have
555	 * the AGP mapped at physical address 0
556	 * --BenH.
557	 */
558	if (!vma->vm_pgoff
559#if IS_ENABLED(CONFIG_AGP)
560	    && (!dev->agp
561		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
562#endif
563	    )
564		return drm_mmap_dma(filp, vma);
565
566	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
567		DRM_ERROR("Could not find map\n");
568		return -EINVAL;
569	}
570
571	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
572	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
573		return -EPERM;
574
575	/* Check for valid size. */
576	if (map->size < vma->vm_end - vma->vm_start)
577		return -EINVAL;
578
579	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
580		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
581#if defined(__i386__) || defined(__x86_64__)
582		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
583#else
584		/* Ye gads this is ugly.  With more thought
585		   we could move this up higher and use
586		   `protection_map' instead.  */
587		vma->vm_page_prot =
588		    __pgprot(pte_val
589			     (pte_wrprotect
590			      (__pte(pgprot_val(vma->vm_page_prot)))));
591#endif
592	}
593
594	switch (map->type) {
595#if !defined(__arm__)
596	case _DRM_AGP:
597		if (dev->agp && dev->agp->cant_use_aperture) {
598			/*
599			 * On some platforms we can't talk to bus dma address from the CPU, so for
600			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
601			 * pages and mappings in fault()
602			 */
603#if defined(__powerpc__)
604			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
605#endif
606			vma->vm_ops = &drm_vm_ops;
607			break;
608		}
609		/* fall through to _DRM_FRAME_BUFFER... */
610#endif
611	case _DRM_FRAME_BUFFER:
612	case _DRM_REGISTERS:
613		offset = drm_core_get_reg_ofs(dev);
614		vma->vm_page_prot = drm_io_prot(map, vma);
615		if (io_remap_pfn_range(vma, vma->vm_start,
616				       (map->offset + offset) >> PAGE_SHIFT,
617				       vma->vm_end - vma->vm_start,
618				       vma->vm_page_prot))
619			return -EAGAIN;
620		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
621			  " offset = 0x%llx\n",
622			  map->type,
623			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
624
625		vma->vm_ops = &drm_vm_ops;
626		break;
627	case _DRM_CONSISTENT:
628		/* Consistent memory is really like shared memory. But
629		 * it's allocated in a different way, so avoid fault */
630		if (remap_pfn_range(vma, vma->vm_start,
631		    page_to_pfn(virt_to_page(map->handle)),
632		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
633			return -EAGAIN;
634		vma->vm_page_prot = drm_dma_prot(map->type, vma);
635	/* fall through to _DRM_SHM */
636	case _DRM_SHM:
637		vma->vm_ops = &drm_vm_shm_ops;
638		vma->vm_private_data = (void *)map;
639		break;
640	case _DRM_SCATTER_GATHER:
641		vma->vm_ops = &drm_vm_sg_ops;
642		vma->vm_private_data = (void *)map;
643		vma->vm_page_prot = drm_dma_prot(map->type, vma);
644		break;
645	default:
646		return -EINVAL;	/* This should never happen. */
647	}
648	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
649
650	drm_vm_open_locked(dev, vma);
651	return 0;
652}
653
654int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
655{
656	struct drm_file *priv = filp->private_data;
657	struct drm_device *dev = priv->minor->dev;
658	int ret;
659
660	if (drm_device_is_unplugged(dev))
661		return -ENODEV;
662
663	mutex_lock(&dev->struct_mutex);
664	ret = drm_mmap_locked(filp, vma);
665	mutex_unlock(&dev->struct_mutex);
666
667	return ret;
668}
669EXPORT_SYMBOL(drm_legacy_mmap);
670
671void drm_legacy_vma_flush(struct drm_device *dev)
672{
673	struct drm_vma_entry *vma, *vma_temp;
674
675	/* Clear vma list (only needed for legacy drivers) */
676	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
677		list_del(&vma->head);
678		kfree(vma);
679	}
680}
681
682int drm_vma_info(struct seq_file *m, void *data)
683{
684	struct drm_info_node *node = (struct drm_info_node *) m->private;
685	struct drm_device *dev = node->minor->dev;
686	struct drm_vma_entry *pt;
687	struct vm_area_struct *vma;
688	unsigned long vma_count = 0;
689#if defined(__i386__)
690	unsigned int pgprot;
691#endif
692
693	mutex_lock(&dev->struct_mutex);
694	list_for_each_entry(pt, &dev->vmalist, head)
695		vma_count++;
696
697	seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
698		   vma_count, high_memory,
699		   (void *)(unsigned long)virt_to_phys(high_memory));
700
701	list_for_each_entry(pt, &dev->vmalist, head) {
702		vma = pt->vma;
703		if (!vma)
704			continue;
705		seq_printf(m,
706			   "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
707			   pt->pid,
708			   (void *)vma->vm_start, (void *)vma->vm_end,
709			   vma->vm_flags & VM_READ ? 'r' : '-',
710			   vma->vm_flags & VM_WRITE ? 'w' : '-',
711			   vma->vm_flags & VM_EXEC ? 'x' : '-',
712			   vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
713			   vma->vm_flags & VM_LOCKED ? 'l' : '-',
714			   vma->vm_flags & VM_IO ? 'i' : '-',
715			   vma->vm_pgoff);
716
717#if defined(__i386__)
718		pgprot = pgprot_val(vma->vm_page_prot);
719		seq_printf(m, " %c%c%c%c%c%c%c%c%c",
720			   pgprot & _PAGE_PRESENT ? 'p' : '-',
721			   pgprot & _PAGE_RW ? 'w' : 'r',
722			   pgprot & _PAGE_USER ? 'u' : 's',
723			   pgprot & _PAGE_PWT ? 't' : 'b',
724			   pgprot & _PAGE_PCD ? 'u' : 'c',
725			   pgprot & _PAGE_ACCESSED ? 'a' : '-',
726			   pgprot & _PAGE_DIRTY ? 'd' : '-',
727			   pgprot & _PAGE_PSE ? 'm' : 'k',
728			   pgprot & _PAGE_GLOBAL ? 'g' : 'l');
729#endif
730		seq_printf(m, "\n");
731	}
732	mutex_unlock(&dev->struct_mutex);
733	return 0;
734}
v3.15
  1/**
  2 * \file drm_vm.c
  3 * Memory mapping for DRM
  4 *
  5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6 * \author Gareth Hughes <gareth@valinux.com>
  7 */
  8
  9/*
 10 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
 11 *
 12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 14 * All Rights Reserved.
 15 *
 16 * Permission is hereby granted, free of charge, to any person obtaining a
 17 * copy of this software and associated documentation files (the "Software"),
 18 * to deal in the Software without restriction, including without limitation
 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 20 * and/or sell copies of the Software, and to permit persons to whom the
 21 * Software is furnished to do so, subject to the following conditions:
 22 *
 23 * The above copyright notice and this permission notice (including the next
 24 * paragraph) shall be included in all copies or substantial portions of the
 25 * Software.
 26 *
 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 33 * OTHER DEALINGS IN THE SOFTWARE.
 34 */
 35
 36#include <drm/drmP.h>
 37#include <linux/export.h>
 
 38#if defined(__ia64__)
 39#include <linux/efi.h>
 40#include <linux/slab.h>
 41#endif
 
 
 
 
 
 
 
 
 
 42
 43static void drm_vm_open(struct vm_area_struct *vma);
 44static void drm_vm_close(struct vm_area_struct *vma);
 45
 46static pgprot_t drm_io_prot(struct drm_local_map *map,
 47			    struct vm_area_struct *vma)
 48{
 49	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 50
 51#if defined(__i386__) || defined(__x86_64__)
 52	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
 53		tmp = pgprot_noncached(tmp);
 54	else
 55		tmp = pgprot_writecombine(tmp);
 56#elif defined(__powerpc__)
 57	pgprot_val(tmp) |= _PAGE_NO_CACHE;
 58	if (map->type == _DRM_REGISTERS)
 59		pgprot_val(tmp) |= _PAGE_GUARDED;
 60#elif defined(__ia64__)
 61	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
 62				    vma->vm_start))
 63		tmp = pgprot_writecombine(tmp);
 64	else
 65		tmp = pgprot_noncached(tmp);
 66#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
 67	tmp = pgprot_noncached(tmp);
 68#endif
 69	return tmp;
 70}
 71
 72static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
 73{
 74	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 75
 76#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
 77	tmp |= _PAGE_NO_CACHE;
 78#endif
 79	return tmp;
 80}
 81
 82/**
 83 * \c fault method for AGP virtual memory.
 84 *
 85 * \param vma virtual memory area.
 86 * \param address access address.
 87 * \return pointer to the page structure.
 88 *
 89 * Find the right map and if it's AGP memory find the real physical page to
 90 * map, get the page, increment the use count and return it.
 91 */
 92#if __OS_HAS_AGP
 93static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 94{
 95	struct drm_file *priv = vma->vm_file->private_data;
 96	struct drm_device *dev = priv->minor->dev;
 97	struct drm_local_map *map = NULL;
 98	struct drm_map_list *r_list;
 99	struct drm_hash_item *hash;
100
101	/*
102	 * Find the right map
103	 */
104	if (!dev->agp)
105		goto vm_fault_error;
106
107	if (!dev->agp || !dev->agp->cant_use_aperture)
108		goto vm_fault_error;
109
110	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
111		goto vm_fault_error;
112
113	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
114	map = r_list->map;
115
116	if (map && map->type == _DRM_AGP) {
117		/*
118		 * Using vm_pgoff as a selector forces us to use this unusual
119		 * addressing scheme.
120		 */
121		resource_size_t offset = (unsigned long)vmf->virtual_address -
122			vma->vm_start;
123		resource_size_t baddr = map->offset + offset;
124		struct drm_agp_mem *agpmem;
125		struct page *page;
126
127#ifdef __alpha__
128		/*
129		 * Adjust to a bus-relative address
130		 */
131		baddr -= dev->hose->mem_space->start;
132#endif
133
134		/*
135		 * It's AGP memory - find the real physical page to map
136		 */
137		list_for_each_entry(agpmem, &dev->agp->memory, head) {
138			if (agpmem->bound <= baddr &&
139			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
140				break;
141		}
142
143		if (&agpmem->head == &dev->agp->memory)
144			goto vm_fault_error;
145
146		/*
147		 * Get the page, inc the use count, and return it
148		 */
149		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
150		page = agpmem->memory->pages[offset];
151		get_page(page);
152		vmf->page = page;
153
154		DRM_DEBUG
155		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
156		     (unsigned long long)baddr,
157		     agpmem->memory->pages[offset],
158		     (unsigned long long)offset,
159		     page_count(page));
160		return 0;
161	}
162vm_fault_error:
163	return VM_FAULT_SIGBUS;	/* Disallow mremap */
164}
165#else				/* __OS_HAS_AGP */
166static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
167{
168	return VM_FAULT_SIGBUS;
169}
170#endif				/* __OS_HAS_AGP */
171
172/**
173 * \c nopage method for shared virtual memory.
174 *
175 * \param vma virtual memory area.
176 * \param address access address.
177 * \return pointer to the page structure.
178 *
179 * Get the mapping, find the real physical page to map, get the page, and
180 * return it.
181 */
182static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
183{
184	struct drm_local_map *map = vma->vm_private_data;
185	unsigned long offset;
186	unsigned long i;
187	struct page *page;
188
189	if (!map)
190		return VM_FAULT_SIGBUS;	/* Nothing allocated */
191
192	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
193	i = (unsigned long)map->handle + offset;
194	page = vmalloc_to_page((void *)i);
195	if (!page)
196		return VM_FAULT_SIGBUS;
197	get_page(page);
198	vmf->page = page;
199
200	DRM_DEBUG("shm_fault 0x%lx\n", offset);
201	return 0;
202}
203
204/**
205 * \c close method for shared virtual memory.
206 *
207 * \param vma virtual memory area.
208 *
209 * Deletes map information if we are the last
210 * person to close a mapping and it's not in the global maplist.
211 */
212static void drm_vm_shm_close(struct vm_area_struct *vma)
213{
214	struct drm_file *priv = vma->vm_file->private_data;
215	struct drm_device *dev = priv->minor->dev;
216	struct drm_vma_entry *pt, *temp;
217	struct drm_local_map *map;
218	struct drm_map_list *r_list;
219	int found_maps = 0;
220
221	DRM_DEBUG("0x%08lx,0x%08lx\n",
222		  vma->vm_start, vma->vm_end - vma->vm_start);
223
224	map = vma->vm_private_data;
225
226	mutex_lock(&dev->struct_mutex);
227	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
228		if (pt->vma->vm_private_data == map)
229			found_maps++;
230		if (pt->vma == vma) {
231			list_del(&pt->head);
232			kfree(pt);
233		}
234	}
235
236	/* We were the only map that was found */
237	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
238		/* Check to see if we are in the maplist, if we are not, then
239		 * we delete this mappings information.
240		 */
241		found_maps = 0;
242		list_for_each_entry(r_list, &dev->maplist, head) {
243			if (r_list->map == map)
244				found_maps++;
245		}
246
247		if (!found_maps) {
248			drm_dma_handle_t dmah;
249
250			switch (map->type) {
251			case _DRM_REGISTERS:
252			case _DRM_FRAME_BUFFER:
253				arch_phys_wc_del(map->mtrr);
254				iounmap(map->handle);
255				break;
256			case _DRM_SHM:
257				vfree(map->handle);
258				break;
259			case _DRM_AGP:
260			case _DRM_SCATTER_GATHER:
261				break;
262			case _DRM_CONSISTENT:
263				dmah.vaddr = map->handle;
264				dmah.busaddr = map->offset;
265				dmah.size = map->size;
266				__drm_pci_free(dev, &dmah);
267				break;
268			}
269			kfree(map);
270		}
271	}
272	mutex_unlock(&dev->struct_mutex);
273}
274
275/**
276 * \c fault method for DMA virtual memory.
277 *
278 * \param vma virtual memory area.
279 * \param address access address.
280 * \return pointer to the page structure.
281 *
282 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
283 */
284static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
285{
286	struct drm_file *priv = vma->vm_file->private_data;
287	struct drm_device *dev = priv->minor->dev;
288	struct drm_device_dma *dma = dev->dma;
289	unsigned long offset;
290	unsigned long page_nr;
291	struct page *page;
292
293	if (!dma)
294		return VM_FAULT_SIGBUS;	/* Error */
295	if (!dma->pagelist)
296		return VM_FAULT_SIGBUS;	/* Nothing allocated */
297
298	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
299	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
300	page = virt_to_page((void *)dma->pagelist[page_nr]);
301
302	get_page(page);
303	vmf->page = page;
304
305	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
306	return 0;
307}
308
309/**
310 * \c fault method for scatter-gather virtual memory.
311 *
312 * \param vma virtual memory area.
313 * \param address access address.
314 * \return pointer to the page structure.
315 *
316 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
317 */
318static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
319{
320	struct drm_local_map *map = vma->vm_private_data;
321	struct drm_file *priv = vma->vm_file->private_data;
322	struct drm_device *dev = priv->minor->dev;
323	struct drm_sg_mem *entry = dev->sg;
324	unsigned long offset;
325	unsigned long map_offset;
326	unsigned long page_offset;
327	struct page *page;
328
329	if (!entry)
330		return VM_FAULT_SIGBUS;	/* Error */
331	if (!entry->pagelist)
332		return VM_FAULT_SIGBUS;	/* Nothing allocated */
333
334	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
335	map_offset = map->offset - (unsigned long)dev->sg->virtual;
336	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
337	page = entry->pagelist[page_offset];
338	get_page(page);
339	vmf->page = page;
340
341	return 0;
342}
343
344static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
345{
346	return drm_do_vm_fault(vma, vmf);
347}
348
349static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
350{
351	return drm_do_vm_shm_fault(vma, vmf);
352}
353
354static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
355{
356	return drm_do_vm_dma_fault(vma, vmf);
357}
358
359static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
360{
361	return drm_do_vm_sg_fault(vma, vmf);
362}
363
364/** AGP virtual memory operations */
365static const struct vm_operations_struct drm_vm_ops = {
366	.fault = drm_vm_fault,
367	.open = drm_vm_open,
368	.close = drm_vm_close,
369};
370
371/** Shared virtual memory operations */
372static const struct vm_operations_struct drm_vm_shm_ops = {
373	.fault = drm_vm_shm_fault,
374	.open = drm_vm_open,
375	.close = drm_vm_shm_close,
376};
377
378/** DMA virtual memory operations */
379static const struct vm_operations_struct drm_vm_dma_ops = {
380	.fault = drm_vm_dma_fault,
381	.open = drm_vm_open,
382	.close = drm_vm_close,
383};
384
385/** Scatter-gather virtual memory operations */
386static const struct vm_operations_struct drm_vm_sg_ops = {
387	.fault = drm_vm_sg_fault,
388	.open = drm_vm_open,
389	.close = drm_vm_close,
390};
391
392/**
393 * \c open method for shared virtual memory.
394 *
395 * \param vma virtual memory area.
396 *
397 * Create a new drm_vma_entry structure as the \p vma private data entry and
398 * add it to drm_device::vmalist.
399 */
400void drm_vm_open_locked(struct drm_device *dev,
401		struct vm_area_struct *vma)
402{
403	struct drm_vma_entry *vma_entry;
404
405	DRM_DEBUG("0x%08lx,0x%08lx\n",
406		  vma->vm_start, vma->vm_end - vma->vm_start);
407
408	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
409	if (vma_entry) {
410		vma_entry->vma = vma;
411		vma_entry->pid = current->pid;
412		list_add(&vma_entry->head, &dev->vmalist);
413	}
414}
415EXPORT_SYMBOL_GPL(drm_vm_open_locked);
416
417static void drm_vm_open(struct vm_area_struct *vma)
418{
419	struct drm_file *priv = vma->vm_file->private_data;
420	struct drm_device *dev = priv->minor->dev;
421
422	mutex_lock(&dev->struct_mutex);
423	drm_vm_open_locked(dev, vma);
424	mutex_unlock(&dev->struct_mutex);
425}
426
427void drm_vm_close_locked(struct drm_device *dev,
428		struct vm_area_struct *vma)
429{
430	struct drm_vma_entry *pt, *temp;
431
432	DRM_DEBUG("0x%08lx,0x%08lx\n",
433		  vma->vm_start, vma->vm_end - vma->vm_start);
434
435	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
436		if (pt->vma == vma) {
437			list_del(&pt->head);
438			kfree(pt);
439			break;
440		}
441	}
442}
443
444/**
445 * \c close method for all virtual memory types.
446 *
447 * \param vma virtual memory area.
448 *
449 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
450 * free it.
451 */
452static void drm_vm_close(struct vm_area_struct *vma)
453{
454	struct drm_file *priv = vma->vm_file->private_data;
455	struct drm_device *dev = priv->minor->dev;
456
457	mutex_lock(&dev->struct_mutex);
458	drm_vm_close_locked(dev, vma);
459	mutex_unlock(&dev->struct_mutex);
460}
461
462/**
463 * mmap DMA memory.
464 *
465 * \param file_priv DRM file private.
466 * \param vma virtual memory area.
467 * \return zero on success or a negative number on failure.
468 *
469 * Sets the virtual memory area operations structure to vm_dma_ops, the file
470 * pointer, and calls vm_open().
471 */
472static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
473{
474	struct drm_file *priv = filp->private_data;
475	struct drm_device *dev;
476	struct drm_device_dma *dma;
477	unsigned long length = vma->vm_end - vma->vm_start;
478
479	dev = priv->minor->dev;
480	dma = dev->dma;
481	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
482		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
483
484	/* Length must match exact page count */
485	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
486		return -EINVAL;
487	}
488
489	if (!capable(CAP_SYS_ADMIN) &&
490	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
491		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
492#if defined(__i386__) || defined(__x86_64__)
493		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
494#else
495		/* Ye gads this is ugly.  With more thought
496		   we could move this up higher and use
497		   `protection_map' instead.  */
498		vma->vm_page_prot =
499		    __pgprot(pte_val
500			     (pte_wrprotect
501			      (__pte(pgprot_val(vma->vm_page_prot)))));
502#endif
503	}
504
505	vma->vm_ops = &drm_vm_dma_ops;
506
507	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
508
509	drm_vm_open_locked(dev, vma);
510	return 0;
511}
512
513static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
514{
515#ifdef __alpha__
516	return dev->hose->dense_mem_base;
517#else
518	return 0;
519#endif
520}
521
522/**
523 * mmap DMA memory.
524 *
525 * \param file_priv DRM file private.
526 * \param vma virtual memory area.
527 * \return zero on success or a negative number on failure.
528 *
529 * If the virtual memory area has no offset associated with it then it's a DMA
530 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
531 * checks that the restricted flag is not set, sets the virtual memory operations
532 * according to the mapping type and remaps the pages. Finally sets the file
533 * pointer and calls vm_open().
534 */
535int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
536{
537	struct drm_file *priv = filp->private_data;
538	struct drm_device *dev = priv->minor->dev;
539	struct drm_local_map *map = NULL;
540	resource_size_t offset = 0;
541	struct drm_hash_item *hash;
542
543	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
544		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
545
546	if (!priv->authenticated)
547		return -EACCES;
548
549	/* We check for "dma". On Apple's UniNorth, it's valid to have
550	 * the AGP mapped at physical address 0
551	 * --BenH.
552	 */
553	if (!vma->vm_pgoff
554#if __OS_HAS_AGP
555	    && (!dev->agp
556		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
557#endif
558	    )
559		return drm_mmap_dma(filp, vma);
560
561	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
562		DRM_ERROR("Could not find map\n");
563		return -EINVAL;
564	}
565
566	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
567	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
568		return -EPERM;
569
570	/* Check for valid size. */
571	if (map->size < vma->vm_end - vma->vm_start)
572		return -EINVAL;
573
574	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
575		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
576#if defined(__i386__) || defined(__x86_64__)
577		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
578#else
579		/* Ye gads this is ugly.  With more thought
580		   we could move this up higher and use
581		   `protection_map' instead.  */
582		vma->vm_page_prot =
583		    __pgprot(pte_val
584			     (pte_wrprotect
585			      (__pte(pgprot_val(vma->vm_page_prot)))));
586#endif
587	}
588
589	switch (map->type) {
590#if !defined(__arm__)
591	case _DRM_AGP:
592		if (dev->agp && dev->agp->cant_use_aperture) {
593			/*
594			 * On some platforms we can't talk to bus dma address from the CPU, so for
595			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
596			 * pages and mappings in fault()
597			 */
598#if defined(__powerpc__)
599			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
600#endif
601			vma->vm_ops = &drm_vm_ops;
602			break;
603		}
604		/* fall through to _DRM_FRAME_BUFFER... */
605#endif
606	case _DRM_FRAME_BUFFER:
607	case _DRM_REGISTERS:
608		offset = drm_core_get_reg_ofs(dev);
609		vma->vm_page_prot = drm_io_prot(map, vma);
610		if (io_remap_pfn_range(vma, vma->vm_start,
611				       (map->offset + offset) >> PAGE_SHIFT,
612				       vma->vm_end - vma->vm_start,
613				       vma->vm_page_prot))
614			return -EAGAIN;
615		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
616			  " offset = 0x%llx\n",
617			  map->type,
618			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
619
620		vma->vm_ops = &drm_vm_ops;
621		break;
622	case _DRM_CONSISTENT:
623		/* Consistent memory is really like shared memory. But
624		 * it's allocated in a different way, so avoid fault */
625		if (remap_pfn_range(vma, vma->vm_start,
626		    page_to_pfn(virt_to_page(map->handle)),
627		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
628			return -EAGAIN;
629		vma->vm_page_prot = drm_dma_prot(map->type, vma);
630	/* fall through to _DRM_SHM */
631	case _DRM_SHM:
632		vma->vm_ops = &drm_vm_shm_ops;
633		vma->vm_private_data = (void *)map;
634		break;
635	case _DRM_SCATTER_GATHER:
636		vma->vm_ops = &drm_vm_sg_ops;
637		vma->vm_private_data = (void *)map;
638		vma->vm_page_prot = drm_dma_prot(map->type, vma);
639		break;
640	default:
641		return -EINVAL;	/* This should never happen. */
642	}
643	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
644
645	drm_vm_open_locked(dev, vma);
646	return 0;
647}
648
649int drm_mmap(struct file *filp, struct vm_area_struct *vma)
650{
651	struct drm_file *priv = filp->private_data;
652	struct drm_device *dev = priv->minor->dev;
653	int ret;
654
655	if (drm_device_is_unplugged(dev))
656		return -ENODEV;
657
658	mutex_lock(&dev->struct_mutex);
659	ret = drm_mmap_locked(filp, vma);
660	mutex_unlock(&dev->struct_mutex);
661
662	return ret;
663}
664EXPORT_SYMBOL(drm_mmap);