Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * \file drm_vm.c
  3 * Memory mapping for DRM
  4 *
  5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6 * \author Gareth Hughes <gareth@valinux.com>
  7 */
  8
  9/*
 10 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
 11 *
 12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 14 * All Rights Reserved.
 15 *
 16 * Permission is hereby granted, free of charge, to any person obtaining a
 17 * copy of this software and associated documentation files (the "Software"),
 18 * to deal in the Software without restriction, including without limitation
 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 20 * and/or sell copies of the Software, and to permit persons to whom the
 21 * Software is furnished to do so, subject to the following conditions:
 22 *
 23 * The above copyright notice and this permission notice (including the next
 24 * paragraph) shall be included in all copies or substantial portions of the
 25 * Software.
 26 *
 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 33 * OTHER DEALINGS IN THE SOFTWARE.
 34 */
 35
 
 36#include <linux/export.h>
 37#include <linux/pci.h>
 38#include <linux/seq_file.h>
 39#include <linux/vmalloc.h>
 40#include <linux/pgtable.h>
 41
 42#if defined(__ia64__)
 43#include <linux/efi.h>
 44#include <linux/slab.h>
 45#endif
 46#include <linux/mem_encrypt.h>
 47
 48#include <drm/drm_device.h>
 49#include <drm/drm_drv.h>
 50#include <drm/drm_file.h>
 51#include <drm/drm_framebuffer.h>
 52#include <drm/drm_print.h>
 53
 54#include "drm_internal.h"
 55#include "drm_legacy.h"
 56
 57struct drm_vma_entry {
 58	struct list_head head;
 59	struct vm_area_struct *vma;
 60	pid_t pid;
 61};
 62
 63static void drm_vm_open(struct vm_area_struct *vma);
 64static void drm_vm_close(struct vm_area_struct *vma);
 65
 66static pgprot_t drm_io_prot(struct drm_local_map *map,
 67			    struct vm_area_struct *vma)
 68{
 69	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 70
 71#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
 72    defined(__mips__) || defined(__loongarch__)
 73	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
 74		tmp = pgprot_noncached(tmp);
 75	else
 76		tmp = pgprot_writecombine(tmp);
 
 
 
 77#elif defined(__ia64__)
 78	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
 79				    vma->vm_start))
 80		tmp = pgprot_writecombine(tmp);
 81	else
 82		tmp = pgprot_noncached(tmp);
 83#elif defined(__sparc__) || defined(__arm__)
 84	tmp = pgprot_noncached(tmp);
 85#endif
 86	return tmp;
 87}
 88
 89static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
 90{
 91	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 92
 93#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
 94	tmp = pgprot_noncached_wc(tmp);
 95#endif
 96	return tmp;
 97}
 98
 99/*
100 * \c fault method for AGP virtual memory.
101 *
102 * \param vma virtual memory area.
103 * \param address access address.
104 * \return pointer to the page structure.
105 *
106 * Find the right map and if it's AGP memory find the real physical page to
107 * map, get the page, increment the use count and return it.
108 */
109#if IS_ENABLED(CONFIG_AGP)
110static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
111{
112	struct vm_area_struct *vma = vmf->vma;
113	struct drm_file *priv = vma->vm_file->private_data;
114	struct drm_device *dev = priv->minor->dev;
115	struct drm_local_map *map = NULL;
116	struct drm_map_list *r_list;
117	struct drm_hash_item *hash;
118
119	/*
120	 * Find the right map
121	 */
122	if (!dev->agp)
123		goto vm_fault_error;
124
125	if (!dev->agp || !dev->agp->cant_use_aperture)
126		goto vm_fault_error;
127
128	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
129		goto vm_fault_error;
130
131	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
132	map = r_list->map;
133
134	if (map && map->type == _DRM_AGP) {
135		/*
136		 * Using vm_pgoff as a selector forces us to use this unusual
137		 * addressing scheme.
138		 */
139		resource_size_t offset = vmf->address - vma->vm_start;
 
140		resource_size_t baddr = map->offset + offset;
141		struct drm_agp_mem *agpmem;
142		struct page *page;
143
144#ifdef __alpha__
145		/*
146		 * Adjust to a bus-relative address
147		 */
148		baddr -= dev->hose->mem_space->start;
149#endif
150
151		/*
152		 * It's AGP memory - find the real physical page to map
153		 */
154		list_for_each_entry(agpmem, &dev->agp->memory, head) {
155			if (agpmem->bound <= baddr &&
156			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
157				break;
158		}
159
160		if (&agpmem->head == &dev->agp->memory)
161			goto vm_fault_error;
162
163		/*
164		 * Get the page, inc the use count, and return it
165		 */
166		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
167		page = agpmem->memory->pages[offset];
168		get_page(page);
169		vmf->page = page;
170
171		DRM_DEBUG
172		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
173		     (unsigned long long)baddr,
174		     agpmem->memory->pages[offset],
175		     (unsigned long long)offset,
176		     page_count(page));
177		return 0;
178	}
179vm_fault_error:
180	return VM_FAULT_SIGBUS;	/* Disallow mremap */
181}
182#else
183static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
184{
185	return VM_FAULT_SIGBUS;
186}
187#endif
188
189/*
190 * \c nopage method for shared virtual memory.
191 *
192 * \param vma virtual memory area.
193 * \param address access address.
194 * \return pointer to the page structure.
195 *
196 * Get the mapping, find the real physical page to map, get the page, and
197 * return it.
198 */
199static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
200{
201	struct vm_area_struct *vma = vmf->vma;
202	struct drm_local_map *map = vma->vm_private_data;
203	unsigned long offset;
204	unsigned long i;
205	struct page *page;
206
207	if (!map)
208		return VM_FAULT_SIGBUS;	/* Nothing allocated */
209
210	offset = vmf->address - vma->vm_start;
211	i = (unsigned long)map->handle + offset;
212	page = vmalloc_to_page((void *)i);
213	if (!page)
214		return VM_FAULT_SIGBUS;
215	get_page(page);
216	vmf->page = page;
217
218	DRM_DEBUG("shm_fault 0x%lx\n", offset);
219	return 0;
220}
221
222/*
223 * \c close method for shared virtual memory.
224 *
225 * \param vma virtual memory area.
226 *
227 * Deletes map information if we are the last
228 * person to close a mapping and it's not in the global maplist.
229 */
230static void drm_vm_shm_close(struct vm_area_struct *vma)
231{
232	struct drm_file *priv = vma->vm_file->private_data;
233	struct drm_device *dev = priv->minor->dev;
234	struct drm_vma_entry *pt, *temp;
235	struct drm_local_map *map;
236	struct drm_map_list *r_list;
237	int found_maps = 0;
238
239	DRM_DEBUG("0x%08lx,0x%08lx\n",
240		  vma->vm_start, vma->vm_end - vma->vm_start);
 
241
242	map = vma->vm_private_data;
243
244	mutex_lock(&dev->struct_mutex);
245	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
246		if (pt->vma->vm_private_data == map)
247			found_maps++;
248		if (pt->vma == vma) {
249			list_del(&pt->head);
250			kfree(pt);
251		}
252	}
253
254	/* We were the only map that was found */
255	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
256		/* Check to see if we are in the maplist, if we are not, then
257		 * we delete this mappings information.
258		 */
259		found_maps = 0;
260		list_for_each_entry(r_list, &dev->maplist, head) {
261			if (r_list->map == map)
262				found_maps++;
263		}
264
265		if (!found_maps) {
 
 
266			switch (map->type) {
267			case _DRM_REGISTERS:
268			case _DRM_FRAME_BUFFER:
269				arch_phys_wc_del(map->mtrr);
 
 
 
 
 
 
270				iounmap(map->handle);
271				break;
272			case _DRM_SHM:
273				vfree(map->handle);
274				break;
275			case _DRM_AGP:
276			case _DRM_SCATTER_GATHER:
277				break;
278			case _DRM_CONSISTENT:
279				dma_free_coherent(dev->dev,
280						  map->size,
281						  map->handle,
282						  map->offset);
 
 
 
283				break;
284			}
285			kfree(map);
286		}
287	}
288	mutex_unlock(&dev->struct_mutex);
289}
290
291/*
292 * \c fault method for DMA virtual memory.
293 *
 
294 * \param address access address.
295 * \return pointer to the page structure.
296 *
297 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
298 */
299static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
300{
301	struct vm_area_struct *vma = vmf->vma;
302	struct drm_file *priv = vma->vm_file->private_data;
303	struct drm_device *dev = priv->minor->dev;
304	struct drm_device_dma *dma = dev->dma;
305	unsigned long offset;
306	unsigned long page_nr;
307	struct page *page;
308
309	if (!dma)
310		return VM_FAULT_SIGBUS;	/* Error */
311	if (!dma->pagelist)
312		return VM_FAULT_SIGBUS;	/* Nothing allocated */
313
314	offset = vmf->address - vma->vm_start;
315					/* vm_[pg]off[set] should be 0 */
316	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
317	page = virt_to_page((void *)dma->pagelist[page_nr]);
318
319	get_page(page);
320	vmf->page = page;
321
322	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
323	return 0;
324}
325
326/*
327 * \c fault method for scatter-gather virtual memory.
328 *
 
329 * \param address access address.
330 * \return pointer to the page structure.
331 *
332 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
333 */
334static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
335{
336	struct vm_area_struct *vma = vmf->vma;
337	struct drm_local_map *map = vma->vm_private_data;
338	struct drm_file *priv = vma->vm_file->private_data;
339	struct drm_device *dev = priv->minor->dev;
340	struct drm_sg_mem *entry = dev->sg;
341	unsigned long offset;
342	unsigned long map_offset;
343	unsigned long page_offset;
344	struct page *page;
345
346	if (!entry)
347		return VM_FAULT_SIGBUS;	/* Error */
348	if (!entry->pagelist)
349		return VM_FAULT_SIGBUS;	/* Nothing allocated */
350
351	offset = vmf->address - vma->vm_start;
352	map_offset = map->offset - (unsigned long)dev->sg->virtual;
353	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
354	page = entry->pagelist[page_offset];
355	get_page(page);
356	vmf->page = page;
357
358	return 0;
359}
360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361/** AGP virtual memory operations */
362static const struct vm_operations_struct drm_vm_ops = {
363	.fault = drm_vm_fault,
364	.open = drm_vm_open,
365	.close = drm_vm_close,
366};
367
368/** Shared virtual memory operations */
369static const struct vm_operations_struct drm_vm_shm_ops = {
370	.fault = drm_vm_shm_fault,
371	.open = drm_vm_open,
372	.close = drm_vm_shm_close,
373};
374
375/** DMA virtual memory operations */
376static const struct vm_operations_struct drm_vm_dma_ops = {
377	.fault = drm_vm_dma_fault,
378	.open = drm_vm_open,
379	.close = drm_vm_close,
380};
381
382/** Scatter-gather virtual memory operations */
383static const struct vm_operations_struct drm_vm_sg_ops = {
384	.fault = drm_vm_sg_fault,
385	.open = drm_vm_open,
386	.close = drm_vm_close,
387};
388
389static void drm_vm_open_locked(struct drm_device *dev,
390			       struct vm_area_struct *vma)
 
 
 
 
 
 
 
 
391{
392	struct drm_vma_entry *vma_entry;
393
394	DRM_DEBUG("0x%08lx,0x%08lx\n",
395		  vma->vm_start, vma->vm_end - vma->vm_start);
 
396
397	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
398	if (vma_entry) {
399		vma_entry->vma = vma;
400		vma_entry->pid = current->pid;
401		list_add(&vma_entry->head, &dev->vmalist);
402	}
403}
404
405static void drm_vm_open(struct vm_area_struct *vma)
406{
407	struct drm_file *priv = vma->vm_file->private_data;
408	struct drm_device *dev = priv->minor->dev;
409
410	mutex_lock(&dev->struct_mutex);
411	drm_vm_open_locked(dev, vma);
412	mutex_unlock(&dev->struct_mutex);
413}
414
415static void drm_vm_close_locked(struct drm_device *dev,
416				struct vm_area_struct *vma)
417{
418	struct drm_vma_entry *pt, *temp;
419
420	DRM_DEBUG("0x%08lx,0x%08lx\n",
421		  vma->vm_start, vma->vm_end - vma->vm_start);
 
422
423	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
424		if (pt->vma == vma) {
425			list_del(&pt->head);
426			kfree(pt);
427			break;
428		}
429	}
430}
431
432/*
433 * \c close method for all virtual memory types.
434 *
435 * \param vma virtual memory area.
436 *
437 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
438 * free it.
439 */
440static void drm_vm_close(struct vm_area_struct *vma)
441{
442	struct drm_file *priv = vma->vm_file->private_data;
443	struct drm_device *dev = priv->minor->dev;
444
445	mutex_lock(&dev->struct_mutex);
446	drm_vm_close_locked(dev, vma);
447	mutex_unlock(&dev->struct_mutex);
448}
449
450/*
451 * mmap DMA memory.
452 *
453 * \param file_priv DRM file private.
454 * \param vma virtual memory area.
455 * \return zero on success or a negative number on failure.
456 *
457 * Sets the virtual memory area operations structure to vm_dma_ops, the file
458 * pointer, and calls vm_open().
459 */
460static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
461{
462	struct drm_file *priv = filp->private_data;
463	struct drm_device *dev;
464	struct drm_device_dma *dma;
465	unsigned long length = vma->vm_end - vma->vm_start;
466
467	dev = priv->minor->dev;
468	dma = dev->dma;
469	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
470		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
471
472	/* Length must match exact page count */
473	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
474		return -EINVAL;
475	}
476
477	if (!capable(CAP_SYS_ADMIN) &&
478	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
479		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
480#if defined(__i386__) || defined(__x86_64__)
481		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
482#else
483		/* Ye gads this is ugly.  With more thought
484		   we could move this up higher and use
485		   `protection_map' instead.  */
486		vma->vm_page_prot =
487		    __pgprot(pte_val
488			     (pte_wrprotect
489			      (__pte(pgprot_val(vma->vm_page_prot)))));
490#endif
491	}
492
493	vma->vm_ops = &drm_vm_dma_ops;
494
495	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 
496
497	drm_vm_open_locked(dev, vma);
498	return 0;
499}
500
501static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
502{
503#ifdef __alpha__
504	return dev->hose->dense_mem_base;
505#else
506	return 0;
507#endif
508}
509
510/*
511 * mmap DMA memory.
512 *
513 * \param file_priv DRM file private.
514 * \param vma virtual memory area.
515 * \return zero on success or a negative number on failure.
516 *
517 * If the virtual memory area has no offset associated with it then it's a DMA
518 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
519 * checks that the restricted flag is not set, sets the virtual memory operations
520 * according to the mapping type and remaps the pages. Finally sets the file
521 * pointer and calls vm_open().
522 */
523static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
524{
525	struct drm_file *priv = filp->private_data;
526	struct drm_device *dev = priv->minor->dev;
527	struct drm_local_map *map = NULL;
528	resource_size_t offset = 0;
529	struct drm_hash_item *hash;
530
531	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
532		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
533
534	if (!priv->authenticated)
535		return -EACCES;
536
537	/* We check for "dma". On Apple's UniNorth, it's valid to have
538	 * the AGP mapped at physical address 0
539	 * --BenH.
540	 */
541	if (!vma->vm_pgoff
542#if IS_ENABLED(CONFIG_AGP)
543	    && (!dev->agp
544		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
545#endif
546	    )
547		return drm_mmap_dma(filp, vma);
548
549	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
550		DRM_ERROR("Could not find map\n");
551		return -EINVAL;
552	}
553
554	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
555	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
556		return -EPERM;
557
558	/* Check for valid size. */
559	if (map->size < vma->vm_end - vma->vm_start)
560		return -EINVAL;
561
562	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
563		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
564#if defined(__i386__) || defined(__x86_64__)
565		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
566#else
567		/* Ye gads this is ugly.  With more thought
568		   we could move this up higher and use
569		   `protection_map' instead.  */
570		vma->vm_page_prot =
571		    __pgprot(pte_val
572			     (pte_wrprotect
573			      (__pte(pgprot_val(vma->vm_page_prot)))));
574#endif
575	}
576
577	switch (map->type) {
578#if !defined(__arm__)
579	case _DRM_AGP:
580		if (dev->agp && dev->agp->cant_use_aperture) {
581			/*
582			 * On some platforms we can't talk to bus dma address from the CPU, so for
583			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
584			 * pages and mappings in fault()
585			 */
586#if defined(__powerpc__)
587			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
588#endif
589			vma->vm_ops = &drm_vm_ops;
590			break;
591		}
592		fallthrough;	/* to _DRM_FRAME_BUFFER... */
593#endif
594	case _DRM_FRAME_BUFFER:
595	case _DRM_REGISTERS:
596		offset = drm_core_get_reg_ofs(dev);
597		vma->vm_page_prot = drm_io_prot(map, vma);
 
 
598		if (io_remap_pfn_range(vma, vma->vm_start,
599				       (map->offset + offset) >> PAGE_SHIFT,
600				       vma->vm_end - vma->vm_start,
601				       vma->vm_page_prot))
602			return -EAGAIN;
 
 
 
 
 
 
 
 
603		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
604			  " offset = 0x%llx\n",
605			  map->type,
606			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
607
608		vma->vm_ops = &drm_vm_ops;
609		break;
610	case _DRM_CONSISTENT:
611		/* Consistent memory is really like shared memory. But
612		 * it's allocated in a different way, so avoid fault */
613		if (remap_pfn_range(vma, vma->vm_start,
614		    page_to_pfn(virt_to_page(map->handle)),
615		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
616			return -EAGAIN;
617		vma->vm_page_prot = drm_dma_prot(map->type, vma);
618		fallthrough;	/* to _DRM_SHM */
619	case _DRM_SHM:
620		vma->vm_ops = &drm_vm_shm_ops;
621		vma->vm_private_data = (void *)map;
 
 
 
622		break;
623	case _DRM_SCATTER_GATHER:
624		vma->vm_ops = &drm_vm_sg_ops;
625		vma->vm_private_data = (void *)map;
 
626		vma->vm_page_prot = drm_dma_prot(map->type, vma);
627		break;
628	default:
629		return -EINVAL;	/* This should never happen. */
630	}
631	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 
632
633	drm_vm_open_locked(dev, vma);
634	return 0;
635}
636
637int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
638{
639	struct drm_file *priv = filp->private_data;
640	struct drm_device *dev = priv->minor->dev;
641	int ret;
642
643	if (drm_dev_is_unplugged(dev))
644		return -ENODEV;
645
646	mutex_lock(&dev->struct_mutex);
647	ret = drm_mmap_locked(filp, vma);
648	mutex_unlock(&dev->struct_mutex);
649
650	return ret;
651}
652EXPORT_SYMBOL(drm_legacy_mmap);
653
654#if IS_ENABLED(CONFIG_DRM_LEGACY)
655void drm_legacy_vma_flush(struct drm_device *dev)
656{
657	struct drm_vma_entry *vma, *vma_temp;
658
659	/* Clear vma list (only needed for legacy drivers) */
660	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
661		list_del(&vma->head);
662		kfree(vma);
663	}
664}
665#endif
v3.5.6
  1/**
  2 * \file drm_vm.c
  3 * Memory mapping for DRM
  4 *
  5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6 * \author Gareth Hughes <gareth@valinux.com>
  7 */
  8
  9/*
 10 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
 11 *
 12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 14 * All Rights Reserved.
 15 *
 16 * Permission is hereby granted, free of charge, to any person obtaining a
 17 * copy of this software and associated documentation files (the "Software"),
 18 * to deal in the Software without restriction, including without limitation
 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 20 * and/or sell copies of the Software, and to permit persons to whom the
 21 * Software is furnished to do so, subject to the following conditions:
 22 *
 23 * The above copyright notice and this permission notice (including the next
 24 * paragraph) shall be included in all copies or substantial portions of the
 25 * Software.
 26 *
 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 33 * OTHER DEALINGS IN THE SOFTWARE.
 34 */
 35
 36#include "drmP.h"
 37#include <linux/export.h>
 
 
 
 
 
 38#if defined(__ia64__)
 39#include <linux/efi.h>
 40#include <linux/slab.h>
 41#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42
 43static void drm_vm_open(struct vm_area_struct *vma);
 44static void drm_vm_close(struct vm_area_struct *vma);
 45
 46static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
 
 47{
 48	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 49
 50#if defined(__i386__) || defined(__x86_64__)
 51	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
 52		pgprot_val(tmp) |= _PAGE_PCD;
 53		pgprot_val(tmp) &= ~_PAGE_PWT;
 54	}
 55#elif defined(__powerpc__)
 56	pgprot_val(tmp) |= _PAGE_NO_CACHE;
 57	if (map_type == _DRM_REGISTERS)
 58		pgprot_val(tmp) |= _PAGE_GUARDED;
 59#elif defined(__ia64__)
 60	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
 61				    vma->vm_start))
 62		tmp = pgprot_writecombine(tmp);
 63	else
 64		tmp = pgprot_noncached(tmp);
 65#elif defined(__sparc__) || defined(__arm__)
 66	tmp = pgprot_noncached(tmp);
 67#endif
 68	return tmp;
 69}
 70
 71static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
 72{
 73	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 74
 75#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
 76	tmp |= _PAGE_NO_CACHE;
 77#endif
 78	return tmp;
 79}
 80
 81/**
 82 * \c fault method for AGP virtual memory.
 83 *
 84 * \param vma virtual memory area.
 85 * \param address access address.
 86 * \return pointer to the page structure.
 87 *
 88 * Find the right map and if it's AGP memory find the real physical page to
 89 * map, get the page, increment the use count and return it.
 90 */
 91#if __OS_HAS_AGP
 92static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 93{
 
 94	struct drm_file *priv = vma->vm_file->private_data;
 95	struct drm_device *dev = priv->minor->dev;
 96	struct drm_local_map *map = NULL;
 97	struct drm_map_list *r_list;
 98	struct drm_hash_item *hash;
 99
100	/*
101	 * Find the right map
102	 */
103	if (!drm_core_has_AGP(dev))
104		goto vm_fault_error;
105
106	if (!dev->agp || !dev->agp->cant_use_aperture)
107		goto vm_fault_error;
108
109	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
110		goto vm_fault_error;
111
112	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
113	map = r_list->map;
114
115	if (map && map->type == _DRM_AGP) {
116		/*
117		 * Using vm_pgoff as a selector forces us to use this unusual
118		 * addressing scheme.
119		 */
120		resource_size_t offset = (unsigned long)vmf->virtual_address -
121			vma->vm_start;
122		resource_size_t baddr = map->offset + offset;
123		struct drm_agp_mem *agpmem;
124		struct page *page;
125
126#ifdef __alpha__
127		/*
128		 * Adjust to a bus-relative address
129		 */
130		baddr -= dev->hose->mem_space->start;
131#endif
132
133		/*
134		 * It's AGP memory - find the real physical page to map
135		 */
136		list_for_each_entry(agpmem, &dev->agp->memory, head) {
137			if (agpmem->bound <= baddr &&
138			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
139				break;
140		}
141
142		if (&agpmem->head == &dev->agp->memory)
143			goto vm_fault_error;
144
145		/*
146		 * Get the page, inc the use count, and return it
147		 */
148		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
149		page = agpmem->memory->pages[offset];
150		get_page(page);
151		vmf->page = page;
152
153		DRM_DEBUG
154		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
155		     (unsigned long long)baddr,
156		     agpmem->memory->pages[offset],
157		     (unsigned long long)offset,
158		     page_count(page));
159		return 0;
160	}
161vm_fault_error:
162	return VM_FAULT_SIGBUS;	/* Disallow mremap */
163}
164#else				/* __OS_HAS_AGP */
165static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
166{
167	return VM_FAULT_SIGBUS;
168}
169#endif				/* __OS_HAS_AGP */
170
171/**
172 * \c nopage method for shared virtual memory.
173 *
174 * \param vma virtual memory area.
175 * \param address access address.
176 * \return pointer to the page structure.
177 *
178 * Get the mapping, find the real physical page to map, get the page, and
179 * return it.
180 */
181static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
182{
 
183	struct drm_local_map *map = vma->vm_private_data;
184	unsigned long offset;
185	unsigned long i;
186	struct page *page;
187
188	if (!map)
189		return VM_FAULT_SIGBUS;	/* Nothing allocated */
190
191	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
192	i = (unsigned long)map->handle + offset;
193	page = vmalloc_to_page((void *)i);
194	if (!page)
195		return VM_FAULT_SIGBUS;
196	get_page(page);
197	vmf->page = page;
198
199	DRM_DEBUG("shm_fault 0x%lx\n", offset);
200	return 0;
201}
202
203/**
204 * \c close method for shared virtual memory.
205 *
206 * \param vma virtual memory area.
207 *
208 * Deletes map information if we are the last
209 * person to close a mapping and it's not in the global maplist.
210 */
211static void drm_vm_shm_close(struct vm_area_struct *vma)
212{
213	struct drm_file *priv = vma->vm_file->private_data;
214	struct drm_device *dev = priv->minor->dev;
215	struct drm_vma_entry *pt, *temp;
216	struct drm_local_map *map;
217	struct drm_map_list *r_list;
218	int found_maps = 0;
219
220	DRM_DEBUG("0x%08lx,0x%08lx\n",
221		  vma->vm_start, vma->vm_end - vma->vm_start);
222	atomic_dec(&dev->vma_count);
223
224	map = vma->vm_private_data;
225
226	mutex_lock(&dev->struct_mutex);
227	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
228		if (pt->vma->vm_private_data == map)
229			found_maps++;
230		if (pt->vma == vma) {
231			list_del(&pt->head);
232			kfree(pt);
233		}
234	}
235
236	/* We were the only map that was found */
237	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
238		/* Check to see if we are in the maplist, if we are not, then
239		 * we delete this mappings information.
240		 */
241		found_maps = 0;
242		list_for_each_entry(r_list, &dev->maplist, head) {
243			if (r_list->map == map)
244				found_maps++;
245		}
246
247		if (!found_maps) {
248			drm_dma_handle_t dmah;
249
250			switch (map->type) {
251			case _DRM_REGISTERS:
252			case _DRM_FRAME_BUFFER:
253				if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
254					int retcode;
255					retcode = mtrr_del(map->mtrr,
256							   map->offset,
257							   map->size);
258					DRM_DEBUG("mtrr_del = %d\n", retcode);
259				}
260				iounmap(map->handle);
261				break;
262			case _DRM_SHM:
263				vfree(map->handle);
264				break;
265			case _DRM_AGP:
266			case _DRM_SCATTER_GATHER:
267				break;
268			case _DRM_CONSISTENT:
269				dmah.vaddr = map->handle;
270				dmah.busaddr = map->offset;
271				dmah.size = map->size;
272				__drm_pci_free(dev, &dmah);
273				break;
274			case _DRM_GEM:
275				DRM_ERROR("tried to rmmap GEM object\n");
276				break;
277			}
278			kfree(map);
279		}
280	}
281	mutex_unlock(&dev->struct_mutex);
282}
283
284/**
285 * \c fault method for DMA virtual memory.
286 *
287 * \param vma virtual memory area.
288 * \param address access address.
289 * \return pointer to the page structure.
290 *
291 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
292 */
293static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
294{
 
295	struct drm_file *priv = vma->vm_file->private_data;
296	struct drm_device *dev = priv->minor->dev;
297	struct drm_device_dma *dma = dev->dma;
298	unsigned long offset;
299	unsigned long page_nr;
300	struct page *page;
301
302	if (!dma)
303		return VM_FAULT_SIGBUS;	/* Error */
304	if (!dma->pagelist)
305		return VM_FAULT_SIGBUS;	/* Nothing allocated */
306
307	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
 
308	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
309	page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
310
311	get_page(page);
312	vmf->page = page;
313
314	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
315	return 0;
316}
317
318/**
319 * \c fault method for scatter-gather virtual memory.
320 *
321 * \param vma virtual memory area.
322 * \param address access address.
323 * \return pointer to the page structure.
324 *
325 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
326 */
327static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
328{
 
329	struct drm_local_map *map = vma->vm_private_data;
330	struct drm_file *priv = vma->vm_file->private_data;
331	struct drm_device *dev = priv->minor->dev;
332	struct drm_sg_mem *entry = dev->sg;
333	unsigned long offset;
334	unsigned long map_offset;
335	unsigned long page_offset;
336	struct page *page;
337
338	if (!entry)
339		return VM_FAULT_SIGBUS;	/* Error */
340	if (!entry->pagelist)
341		return VM_FAULT_SIGBUS;	/* Nothing allocated */
342
343	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
344	map_offset = map->offset - (unsigned long)dev->sg->virtual;
345	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
346	page = entry->pagelist[page_offset];
347	get_page(page);
348	vmf->page = page;
349
350	return 0;
351}
352
353static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
354{
355	return drm_do_vm_fault(vma, vmf);
356}
357
358static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
359{
360	return drm_do_vm_shm_fault(vma, vmf);
361}
362
363static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
364{
365	return drm_do_vm_dma_fault(vma, vmf);
366}
367
368static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
369{
370	return drm_do_vm_sg_fault(vma, vmf);
371}
372
373/** AGP virtual memory operations */
374static const struct vm_operations_struct drm_vm_ops = {
375	.fault = drm_vm_fault,
376	.open = drm_vm_open,
377	.close = drm_vm_close,
378};
379
380/** Shared virtual memory operations */
381static const struct vm_operations_struct drm_vm_shm_ops = {
382	.fault = drm_vm_shm_fault,
383	.open = drm_vm_open,
384	.close = drm_vm_shm_close,
385};
386
387/** DMA virtual memory operations */
388static const struct vm_operations_struct drm_vm_dma_ops = {
389	.fault = drm_vm_dma_fault,
390	.open = drm_vm_open,
391	.close = drm_vm_close,
392};
393
394/** Scatter-gather virtual memory operations */
395static const struct vm_operations_struct drm_vm_sg_ops = {
396	.fault = drm_vm_sg_fault,
397	.open = drm_vm_open,
398	.close = drm_vm_close,
399};
400
401/**
402 * \c open method for shared virtual memory.
403 *
404 * \param vma virtual memory area.
405 *
406 * Create a new drm_vma_entry structure as the \p vma private data entry and
407 * add it to drm_device::vmalist.
408 */
409void drm_vm_open_locked(struct drm_device *dev,
410		struct vm_area_struct *vma)
411{
412	struct drm_vma_entry *vma_entry;
413
414	DRM_DEBUG("0x%08lx,0x%08lx\n",
415		  vma->vm_start, vma->vm_end - vma->vm_start);
416	atomic_inc(&dev->vma_count);
417
418	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
419	if (vma_entry) {
420		vma_entry->vma = vma;
421		vma_entry->pid = current->pid;
422		list_add(&vma_entry->head, &dev->vmalist);
423	}
424}
425
426static void drm_vm_open(struct vm_area_struct *vma)
427{
428	struct drm_file *priv = vma->vm_file->private_data;
429	struct drm_device *dev = priv->minor->dev;
430
431	mutex_lock(&dev->struct_mutex);
432	drm_vm_open_locked(dev, vma);
433	mutex_unlock(&dev->struct_mutex);
434}
435
436void drm_vm_close_locked(struct drm_device *dev,
437		struct vm_area_struct *vma)
438{
439	struct drm_vma_entry *pt, *temp;
440
441	DRM_DEBUG("0x%08lx,0x%08lx\n",
442		  vma->vm_start, vma->vm_end - vma->vm_start);
443	atomic_dec(&dev->vma_count);
444
445	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
446		if (pt->vma == vma) {
447			list_del(&pt->head);
448			kfree(pt);
449			break;
450		}
451	}
452}
453
454/**
455 * \c close method for all virtual memory types.
456 *
457 * \param vma virtual memory area.
458 *
459 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
460 * free it.
461 */
462static void drm_vm_close(struct vm_area_struct *vma)
463{
464	struct drm_file *priv = vma->vm_file->private_data;
465	struct drm_device *dev = priv->minor->dev;
466
467	mutex_lock(&dev->struct_mutex);
468	drm_vm_close_locked(dev, vma);
469	mutex_unlock(&dev->struct_mutex);
470}
471
472/**
473 * mmap DMA memory.
474 *
475 * \param file_priv DRM file private.
476 * \param vma virtual memory area.
477 * \return zero on success or a negative number on failure.
478 *
479 * Sets the virtual memory area operations structure to vm_dma_ops, the file
480 * pointer, and calls vm_open().
481 */
482static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
483{
484	struct drm_file *priv = filp->private_data;
485	struct drm_device *dev;
486	struct drm_device_dma *dma;
487	unsigned long length = vma->vm_end - vma->vm_start;
488
489	dev = priv->minor->dev;
490	dma = dev->dma;
491	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
492		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
493
494	/* Length must match exact page count */
495	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
496		return -EINVAL;
497	}
498
499	if (!capable(CAP_SYS_ADMIN) &&
500	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
501		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
502#if defined(__i386__) || defined(__x86_64__)
503		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
504#else
505		/* Ye gads this is ugly.  With more thought
506		   we could move this up higher and use
507		   `protection_map' instead.  */
508		vma->vm_page_prot =
509		    __pgprot(pte_val
510			     (pte_wrprotect
511			      (__pte(pgprot_val(vma->vm_page_prot)))));
512#endif
513	}
514
515	vma->vm_ops = &drm_vm_dma_ops;
516
517	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
518	vma->vm_flags |= VM_DONTEXPAND;
519
520	drm_vm_open_locked(dev, vma);
521	return 0;
522}
523
524static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
525{
526#ifdef __alpha__
527	return dev->hose->dense_mem_base;
528#else
529	return 0;
530#endif
531}
532
533/**
534 * mmap DMA memory.
535 *
536 * \param file_priv DRM file private.
537 * \param vma virtual memory area.
538 * \return zero on success or a negative number on failure.
539 *
540 * If the virtual memory area has no offset associated with it then it's a DMA
541 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
542 * checks that the restricted flag is not set, sets the virtual memory operations
543 * according to the mapping type and remaps the pages. Finally sets the file
544 * pointer and calls vm_open().
545 */
546int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
547{
548	struct drm_file *priv = filp->private_data;
549	struct drm_device *dev = priv->minor->dev;
550	struct drm_local_map *map = NULL;
551	resource_size_t offset = 0;
552	struct drm_hash_item *hash;
553
554	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
555		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
556
557	if (!priv->authenticated)
558		return -EACCES;
559
560	/* We check for "dma". On Apple's UniNorth, it's valid to have
561	 * the AGP mapped at physical address 0
562	 * --BenH.
563	 */
564	if (!vma->vm_pgoff
565#if __OS_HAS_AGP
566	    && (!dev->agp
567		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
568#endif
569	    )
570		return drm_mmap_dma(filp, vma);
571
572	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
573		DRM_ERROR("Could not find map\n");
574		return -EINVAL;
575	}
576
577	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
578	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
579		return -EPERM;
580
581	/* Check for valid size. */
582	if (map->size < vma->vm_end - vma->vm_start)
583		return -EINVAL;
584
585	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
586		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
587#if defined(__i386__) || defined(__x86_64__)
588		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
589#else
590		/* Ye gads this is ugly.  With more thought
591		   we could move this up higher and use
592		   `protection_map' instead.  */
593		vma->vm_page_prot =
594		    __pgprot(pte_val
595			     (pte_wrprotect
596			      (__pte(pgprot_val(vma->vm_page_prot)))));
597#endif
598	}
599
600	switch (map->type) {
601#if !defined(__arm__)
602	case _DRM_AGP:
603		if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
604			/*
605			 * On some platforms we can't talk to bus dma address from the CPU, so for
606			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
607			 * pages and mappings in fault()
608			 */
609#if defined(__powerpc__)
610			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
611#endif
612			vma->vm_ops = &drm_vm_ops;
613			break;
614		}
615		/* fall through to _DRM_FRAME_BUFFER... */
616#endif
617	case _DRM_FRAME_BUFFER:
618	case _DRM_REGISTERS:
619		offset = drm_core_get_reg_ofs(dev);
620		vma->vm_flags |= VM_IO;	/* not in core dump */
621		vma->vm_page_prot = drm_io_prot(map->type, vma);
622#if !defined(__arm__)
623		if (io_remap_pfn_range(vma, vma->vm_start,
624				       (map->offset + offset) >> PAGE_SHIFT,
625				       vma->vm_end - vma->vm_start,
626				       vma->vm_page_prot))
627			return -EAGAIN;
628#else
629		if (remap_pfn_range(vma, vma->vm_start,
630					(map->offset + offset) >> PAGE_SHIFT,
631					vma->vm_end - vma->vm_start,
632					vma->vm_page_prot))
633			return -EAGAIN;
634#endif
635
636		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
637			  " offset = 0x%llx\n",
638			  map->type,
639			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
640
641		vma->vm_ops = &drm_vm_ops;
642		break;
643	case _DRM_CONSISTENT:
644		/* Consistent memory is really like shared memory. But
645		 * it's allocated in a different way, so avoid fault */
646		if (remap_pfn_range(vma, vma->vm_start,
647		    page_to_pfn(virt_to_page(map->handle)),
648		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
649			return -EAGAIN;
650		vma->vm_page_prot = drm_dma_prot(map->type, vma);
651	/* fall through to _DRM_SHM */
652	case _DRM_SHM:
653		vma->vm_ops = &drm_vm_shm_ops;
654		vma->vm_private_data = (void *)map;
655		/* Don't let this area swap.  Change when
656		   DRM_KERNEL advisory is supported. */
657		vma->vm_flags |= VM_RESERVED;
658		break;
659	case _DRM_SCATTER_GATHER:
660		vma->vm_ops = &drm_vm_sg_ops;
661		vma->vm_private_data = (void *)map;
662		vma->vm_flags |= VM_RESERVED;
663		vma->vm_page_prot = drm_dma_prot(map->type, vma);
664		break;
665	default:
666		return -EINVAL;	/* This should never happen. */
667	}
668	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
669	vma->vm_flags |= VM_DONTEXPAND;
670
671	drm_vm_open_locked(dev, vma);
672	return 0;
673}
674
675int drm_mmap(struct file *filp, struct vm_area_struct *vma)
676{
677	struct drm_file *priv = filp->private_data;
678	struct drm_device *dev = priv->minor->dev;
679	int ret;
680
681	if (drm_device_is_unplugged(dev))
682		return -ENODEV;
683
684	mutex_lock(&dev->struct_mutex);
685	ret = drm_mmap_locked(filp, vma);
686	mutex_unlock(&dev->struct_mutex);
687
688	return ret;
689}
690EXPORT_SYMBOL(drm_mmap);