Linux Audio

Check our new training course

Loading...
v3.1
  1/**
  2 * \file drm_vm.c
  3 * Memory mapping for DRM
  4 *
  5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6 * \author Gareth Hughes <gareth@valinux.com>
  7 */
  8
  9/*
 10 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
 11 *
 12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 14 * All Rights Reserved.
 15 *
 16 * Permission is hereby granted, free of charge, to any person obtaining a
 17 * copy of this software and associated documentation files (the "Software"),
 18 * to deal in the Software without restriction, including without limitation
 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 20 * and/or sell copies of the Software, and to permit persons to whom the
 21 * Software is furnished to do so, subject to the following conditions:
 22 *
 23 * The above copyright notice and this permission notice (including the next
 24 * paragraph) shall be included in all copies or substantial portions of the
 25 * Software.
 26 *
 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 33 * OTHER DEALINGS IN THE SOFTWARE.
 34 */
 35
 36#include "drmP.h"
 
 37#if defined(__ia64__)
 38#include <linux/efi.h>
 39#include <linux/slab.h>
 40#endif
 41
 42static void drm_vm_open(struct vm_area_struct *vma);
 43static void drm_vm_close(struct vm_area_struct *vma);
 44
 45static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
 
 46{
 47	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 48
 49#if defined(__i386__) || defined(__x86_64__)
 50	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
 51		pgprot_val(tmp) |= _PAGE_PCD;
 52		pgprot_val(tmp) &= ~_PAGE_PWT;
 53	}
 54#elif defined(__powerpc__)
 55	pgprot_val(tmp) |= _PAGE_NO_CACHE;
 56	if (map_type == _DRM_REGISTERS)
 57		pgprot_val(tmp) |= _PAGE_GUARDED;
 58#elif defined(__ia64__)
 59	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
 60				    vma->vm_start))
 61		tmp = pgprot_writecombine(tmp);
 62	else
 63		tmp = pgprot_noncached(tmp);
 64#elif defined(__sparc__) || defined(__arm__)
 65	tmp = pgprot_noncached(tmp);
 66#endif
 67	return tmp;
 68}
 69
 70static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
 71{
 72	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 73
 74#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
 75	tmp |= _PAGE_NO_CACHE;
 76#endif
 77	return tmp;
 78}
 79
 80/**
 81 * \c fault method for AGP virtual memory.
 82 *
 83 * \param vma virtual memory area.
 84 * \param address access address.
 85 * \return pointer to the page structure.
 86 *
 87 * Find the right map and if it's AGP memory find the real physical page to
 88 * map, get the page, increment the use count and return it.
 89 */
 90#if __OS_HAS_AGP
 91static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 92{
 93	struct drm_file *priv = vma->vm_file->private_data;
 94	struct drm_device *dev = priv->minor->dev;
 95	struct drm_local_map *map = NULL;
 96	struct drm_map_list *r_list;
 97	struct drm_hash_item *hash;
 98
 99	/*
100	 * Find the right map
101	 */
102	if (!drm_core_has_AGP(dev))
103		goto vm_fault_error;
104
105	if (!dev->agp || !dev->agp->cant_use_aperture)
106		goto vm_fault_error;
107
108	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
109		goto vm_fault_error;
110
111	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
112	map = r_list->map;
113
114	if (map && map->type == _DRM_AGP) {
115		/*
116		 * Using vm_pgoff as a selector forces us to use this unusual
117		 * addressing scheme.
118		 */
119		resource_size_t offset = (unsigned long)vmf->virtual_address -
120			vma->vm_start;
121		resource_size_t baddr = map->offset + offset;
122		struct drm_agp_mem *agpmem;
123		struct page *page;
124
125#ifdef __alpha__
126		/*
127		 * Adjust to a bus-relative address
128		 */
129		baddr -= dev->hose->mem_space->start;
130#endif
131
132		/*
133		 * It's AGP memory - find the real physical page to map
134		 */
135		list_for_each_entry(agpmem, &dev->agp->memory, head) {
136			if (agpmem->bound <= baddr &&
137			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
138				break;
139		}
140
141		if (&agpmem->head == &dev->agp->memory)
142			goto vm_fault_error;
143
144		/*
145		 * Get the page, inc the use count, and return it
146		 */
147		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
148		page = agpmem->memory->pages[offset];
149		get_page(page);
150		vmf->page = page;
151
152		DRM_DEBUG
153		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
154		     (unsigned long long)baddr,
155		     agpmem->memory->pages[offset],
156		     (unsigned long long)offset,
157		     page_count(page));
158		return 0;
159	}
160vm_fault_error:
161	return VM_FAULT_SIGBUS;	/* Disallow mremap */
162}
163#else				/* __OS_HAS_AGP */
164static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
165{
166	return VM_FAULT_SIGBUS;
167}
168#endif				/* __OS_HAS_AGP */
169
170/**
171 * \c nopage method for shared virtual memory.
172 *
173 * \param vma virtual memory area.
174 * \param address access address.
175 * \return pointer to the page structure.
176 *
177 * Get the mapping, find the real physical page to map, get the page, and
178 * return it.
179 */
180static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
181{
182	struct drm_local_map *map = vma->vm_private_data;
183	unsigned long offset;
184	unsigned long i;
185	struct page *page;
186
187	if (!map)
188		return VM_FAULT_SIGBUS;	/* Nothing allocated */
189
190	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
191	i = (unsigned long)map->handle + offset;
192	page = vmalloc_to_page((void *)i);
193	if (!page)
194		return VM_FAULT_SIGBUS;
195	get_page(page);
196	vmf->page = page;
197
198	DRM_DEBUG("shm_fault 0x%lx\n", offset);
199	return 0;
200}
201
202/**
203 * \c close method for shared virtual memory.
204 *
205 * \param vma virtual memory area.
206 *
207 * Deletes map information if we are the last
208 * person to close a mapping and it's not in the global maplist.
209 */
210static void drm_vm_shm_close(struct vm_area_struct *vma)
211{
212	struct drm_file *priv = vma->vm_file->private_data;
213	struct drm_device *dev = priv->minor->dev;
214	struct drm_vma_entry *pt, *temp;
215	struct drm_local_map *map;
216	struct drm_map_list *r_list;
217	int found_maps = 0;
218
219	DRM_DEBUG("0x%08lx,0x%08lx\n",
220		  vma->vm_start, vma->vm_end - vma->vm_start);
221	atomic_dec(&dev->vma_count);
222
223	map = vma->vm_private_data;
224
225	mutex_lock(&dev->struct_mutex);
226	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
227		if (pt->vma->vm_private_data == map)
228			found_maps++;
229		if (pt->vma == vma) {
230			list_del(&pt->head);
231			kfree(pt);
232		}
233	}
234
235	/* We were the only map that was found */
236	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
237		/* Check to see if we are in the maplist, if we are not, then
238		 * we delete this mappings information.
239		 */
240		found_maps = 0;
241		list_for_each_entry(r_list, &dev->maplist, head) {
242			if (r_list->map == map)
243				found_maps++;
244		}
245
246		if (!found_maps) {
247			drm_dma_handle_t dmah;
248
249			switch (map->type) {
250			case _DRM_REGISTERS:
251			case _DRM_FRAME_BUFFER:
252				if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
253					int retcode;
254					retcode = mtrr_del(map->mtrr,
255							   map->offset,
256							   map->size);
257					DRM_DEBUG("mtrr_del = %d\n", retcode);
258				}
259				iounmap(map->handle);
260				break;
261			case _DRM_SHM:
262				vfree(map->handle);
263				break;
264			case _DRM_AGP:
265			case _DRM_SCATTER_GATHER:
266				break;
267			case _DRM_CONSISTENT:
268				dmah.vaddr = map->handle;
269				dmah.busaddr = map->offset;
270				dmah.size = map->size;
271				__drm_pci_free(dev, &dmah);
272				break;
273			case _DRM_GEM:
274				DRM_ERROR("tried to rmmap GEM object\n");
275				break;
276			}
277			kfree(map);
278		}
279	}
280	mutex_unlock(&dev->struct_mutex);
281}
282
283/**
284 * \c fault method for DMA virtual memory.
285 *
286 * \param vma virtual memory area.
287 * \param address access address.
288 * \return pointer to the page structure.
289 *
290 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
291 */
292static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
293{
294	struct drm_file *priv = vma->vm_file->private_data;
295	struct drm_device *dev = priv->minor->dev;
296	struct drm_device_dma *dma = dev->dma;
297	unsigned long offset;
298	unsigned long page_nr;
299	struct page *page;
300
301	if (!dma)
302		return VM_FAULT_SIGBUS;	/* Error */
303	if (!dma->pagelist)
304		return VM_FAULT_SIGBUS;	/* Nothing allocated */
305
306	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
307	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
308	page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
309
310	get_page(page);
311	vmf->page = page;
312
313	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
314	return 0;
315}
316
317/**
318 * \c fault method for scatter-gather virtual memory.
319 *
320 * \param vma virtual memory area.
321 * \param address access address.
322 * \return pointer to the page structure.
323 *
324 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
325 */
326static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
327{
328	struct drm_local_map *map = vma->vm_private_data;
329	struct drm_file *priv = vma->vm_file->private_data;
330	struct drm_device *dev = priv->minor->dev;
331	struct drm_sg_mem *entry = dev->sg;
332	unsigned long offset;
333	unsigned long map_offset;
334	unsigned long page_offset;
335	struct page *page;
336
337	if (!entry)
338		return VM_FAULT_SIGBUS;	/* Error */
339	if (!entry->pagelist)
340		return VM_FAULT_SIGBUS;	/* Nothing allocated */
341
342	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
343	map_offset = map->offset - (unsigned long)dev->sg->virtual;
344	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
345	page = entry->pagelist[page_offset];
346	get_page(page);
347	vmf->page = page;
348
349	return 0;
350}
351
352static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
353{
354	return drm_do_vm_fault(vma, vmf);
355}
356
357static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
358{
359	return drm_do_vm_shm_fault(vma, vmf);
360}
361
362static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
363{
364	return drm_do_vm_dma_fault(vma, vmf);
365}
366
367static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
368{
369	return drm_do_vm_sg_fault(vma, vmf);
370}
371
372/** AGP virtual memory operations */
373static const struct vm_operations_struct drm_vm_ops = {
374	.fault = drm_vm_fault,
375	.open = drm_vm_open,
376	.close = drm_vm_close,
377};
378
379/** Shared virtual memory operations */
380static const struct vm_operations_struct drm_vm_shm_ops = {
381	.fault = drm_vm_shm_fault,
382	.open = drm_vm_open,
383	.close = drm_vm_shm_close,
384};
385
386/** DMA virtual memory operations */
387static const struct vm_operations_struct drm_vm_dma_ops = {
388	.fault = drm_vm_dma_fault,
389	.open = drm_vm_open,
390	.close = drm_vm_close,
391};
392
393/** Scatter-gather virtual memory operations */
394static const struct vm_operations_struct drm_vm_sg_ops = {
395	.fault = drm_vm_sg_fault,
396	.open = drm_vm_open,
397	.close = drm_vm_close,
398};
399
400/**
401 * \c open method for shared virtual memory.
402 *
403 * \param vma virtual memory area.
404 *
405 * Create a new drm_vma_entry structure as the \p vma private data entry and
406 * add it to drm_device::vmalist.
407 */
408void drm_vm_open_locked(struct vm_area_struct *vma)
 
409{
410	struct drm_file *priv = vma->vm_file->private_data;
411	struct drm_device *dev = priv->minor->dev;
412	struct drm_vma_entry *vma_entry;
413
414	DRM_DEBUG("0x%08lx,0x%08lx\n",
415		  vma->vm_start, vma->vm_end - vma->vm_start);
416	atomic_inc(&dev->vma_count);
417
418	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
419	if (vma_entry) {
420		vma_entry->vma = vma;
421		vma_entry->pid = current->pid;
422		list_add(&vma_entry->head, &dev->vmalist);
423	}
424}
 
425
426static void drm_vm_open(struct vm_area_struct *vma)
427{
428	struct drm_file *priv = vma->vm_file->private_data;
429	struct drm_device *dev = priv->minor->dev;
430
431	mutex_lock(&dev->struct_mutex);
432	drm_vm_open_locked(vma);
433	mutex_unlock(&dev->struct_mutex);
434}
435
436void drm_vm_close_locked(struct vm_area_struct *vma)
 
437{
438	struct drm_file *priv = vma->vm_file->private_data;
439	struct drm_device *dev = priv->minor->dev;
440	struct drm_vma_entry *pt, *temp;
441
442	DRM_DEBUG("0x%08lx,0x%08lx\n",
443		  vma->vm_start, vma->vm_end - vma->vm_start);
444	atomic_dec(&dev->vma_count);
445
446	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
447		if (pt->vma == vma) {
448			list_del(&pt->head);
449			kfree(pt);
450			break;
451		}
452	}
453}
454
455/**
456 * \c close method for all virtual memory types.
457 *
458 * \param vma virtual memory area.
459 *
460 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
461 * free it.
462 */
463static void drm_vm_close(struct vm_area_struct *vma)
464{
465	struct drm_file *priv = vma->vm_file->private_data;
466	struct drm_device *dev = priv->minor->dev;
467
468	mutex_lock(&dev->struct_mutex);
469	drm_vm_close_locked(vma);
470	mutex_unlock(&dev->struct_mutex);
471}
472
473/**
474 * mmap DMA memory.
475 *
476 * \param file_priv DRM file private.
477 * \param vma virtual memory area.
478 * \return zero on success or a negative number on failure.
479 *
480 * Sets the virtual memory area operations structure to vm_dma_ops, the file
481 * pointer, and calls vm_open().
482 */
483static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
484{
485	struct drm_file *priv = filp->private_data;
486	struct drm_device *dev;
487	struct drm_device_dma *dma;
488	unsigned long length = vma->vm_end - vma->vm_start;
489
490	dev = priv->minor->dev;
491	dma = dev->dma;
492	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
493		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
494
495	/* Length must match exact page count */
496	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
497		return -EINVAL;
498	}
499
500	if (!capable(CAP_SYS_ADMIN) &&
501	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
502		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
503#if defined(__i386__) || defined(__x86_64__)
504		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
505#else
506		/* Ye gads this is ugly.  With more thought
507		   we could move this up higher and use
508		   `protection_map' instead.  */
509		vma->vm_page_prot =
510		    __pgprot(pte_val
511			     (pte_wrprotect
512			      (__pte(pgprot_val(vma->vm_page_prot)))));
513#endif
514	}
515
516	vma->vm_ops = &drm_vm_dma_ops;
517
518	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
519	vma->vm_flags |= VM_DONTEXPAND;
520
521	vma->vm_file = filp;	/* Needed for drm_vm_open() */
522	drm_vm_open_locked(vma);
523	return 0;
524}
525
526static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
527{
528#ifdef __alpha__
529	return dev->hose->dense_mem_base;
530#else
531	return 0;
532#endif
533}
534
535/**
536 * mmap DMA memory.
537 *
538 * \param file_priv DRM file private.
539 * \param vma virtual memory area.
540 * \return zero on success or a negative number on failure.
541 *
542 * If the virtual memory area has no offset associated with it then it's a DMA
543 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
544 * checks that the restricted flag is not set, sets the virtual memory operations
545 * according to the mapping type and remaps the pages. Finally sets the file
546 * pointer and calls vm_open().
547 */
548int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
549{
550	struct drm_file *priv = filp->private_data;
551	struct drm_device *dev = priv->minor->dev;
552	struct drm_local_map *map = NULL;
553	resource_size_t offset = 0;
554	struct drm_hash_item *hash;
555
556	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
557		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
558
559	if (!priv->authenticated)
560		return -EACCES;
561
562	/* We check for "dma". On Apple's UniNorth, it's valid to have
563	 * the AGP mapped at physical address 0
564	 * --BenH.
565	 */
566	if (!vma->vm_pgoff
567#if __OS_HAS_AGP
568	    && (!dev->agp
569		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
570#endif
571	    )
572		return drm_mmap_dma(filp, vma);
573
574	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
575		DRM_ERROR("Could not find map\n");
576		return -EINVAL;
577	}
578
579	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
580	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
581		return -EPERM;
582
583	/* Check for valid size. */
584	if (map->size < vma->vm_end - vma->vm_start)
585		return -EINVAL;
586
587	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
588		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
589#if defined(__i386__) || defined(__x86_64__)
590		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
591#else
592		/* Ye gads this is ugly.  With more thought
593		   we could move this up higher and use
594		   `protection_map' instead.  */
595		vma->vm_page_prot =
596		    __pgprot(pte_val
597			     (pte_wrprotect
598			      (__pte(pgprot_val(vma->vm_page_prot)))));
599#endif
600	}
601
602	switch (map->type) {
603#if !defined(__arm__)
604	case _DRM_AGP:
605		if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
606			/*
607			 * On some platforms we can't talk to bus dma address from the CPU, so for
608			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
609			 * pages and mappings in fault()
610			 */
611#if defined(__powerpc__)
612			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
613#endif
614			vma->vm_ops = &drm_vm_ops;
615			break;
616		}
617		/* fall through to _DRM_FRAME_BUFFER... */
618#endif
619	case _DRM_FRAME_BUFFER:
620	case _DRM_REGISTERS:
621		offset = drm_core_get_reg_ofs(dev);
622		vma->vm_flags |= VM_IO;	/* not in core dump */
623		vma->vm_page_prot = drm_io_prot(map->type, vma);
624#if !defined(__arm__)
625		if (io_remap_pfn_range(vma, vma->vm_start,
626				       (map->offset + offset) >> PAGE_SHIFT,
627				       vma->vm_end - vma->vm_start,
628				       vma->vm_page_prot))
629			return -EAGAIN;
630#else
631		if (remap_pfn_range(vma, vma->vm_start,
632					(map->offset + offset) >> PAGE_SHIFT,
633					vma->vm_end - vma->vm_start,
634					vma->vm_page_prot))
635			return -EAGAIN;
636#endif
637
638		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
639			  " offset = 0x%llx\n",
640			  map->type,
641			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
642
643		vma->vm_ops = &drm_vm_ops;
644		break;
645	case _DRM_CONSISTENT:
646		/* Consistent memory is really like shared memory. But
647		 * it's allocated in a different way, so avoid fault */
648		if (remap_pfn_range(vma, vma->vm_start,
649		    page_to_pfn(virt_to_page(map->handle)),
650		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
651			return -EAGAIN;
652		vma->vm_page_prot = drm_dma_prot(map->type, vma);
653	/* fall through to _DRM_SHM */
654	case _DRM_SHM:
655		vma->vm_ops = &drm_vm_shm_ops;
656		vma->vm_private_data = (void *)map;
657		/* Don't let this area swap.  Change when
658		   DRM_KERNEL advisory is supported. */
659		vma->vm_flags |= VM_RESERVED;
660		break;
661	case _DRM_SCATTER_GATHER:
662		vma->vm_ops = &drm_vm_sg_ops;
663		vma->vm_private_data = (void *)map;
664		vma->vm_flags |= VM_RESERVED;
665		vma->vm_page_prot = drm_dma_prot(map->type, vma);
666		break;
667	default:
668		return -EINVAL;	/* This should never happen. */
669	}
670	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
671	vma->vm_flags |= VM_DONTEXPAND;
672
673	vma->vm_file = filp;	/* Needed for drm_vm_open() */
674	drm_vm_open_locked(vma);
675	return 0;
676}
677
678int drm_mmap(struct file *filp, struct vm_area_struct *vma)
679{
680	struct drm_file *priv = filp->private_data;
681	struct drm_device *dev = priv->minor->dev;
682	int ret;
 
 
 
683
684	mutex_lock(&dev->struct_mutex);
685	ret = drm_mmap_locked(filp, vma);
686	mutex_unlock(&dev->struct_mutex);
687
688	return ret;
689}
690EXPORT_SYMBOL(drm_mmap);
v3.15
  1/**
  2 * \file drm_vm.c
  3 * Memory mapping for DRM
  4 *
  5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6 * \author Gareth Hughes <gareth@valinux.com>
  7 */
  8
  9/*
 10 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
 11 *
 12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 14 * All Rights Reserved.
 15 *
 16 * Permission is hereby granted, free of charge, to any person obtaining a
 17 * copy of this software and associated documentation files (the "Software"),
 18 * to deal in the Software without restriction, including without limitation
 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 20 * and/or sell copies of the Software, and to permit persons to whom the
 21 * Software is furnished to do so, subject to the following conditions:
 22 *
 23 * The above copyright notice and this permission notice (including the next
 24 * paragraph) shall be included in all copies or substantial portions of the
 25 * Software.
 26 *
 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 33 * OTHER DEALINGS IN THE SOFTWARE.
 34 */
 35
 36#include <drm/drmP.h>
 37#include <linux/export.h>
 38#if defined(__ia64__)
 39#include <linux/efi.h>
 40#include <linux/slab.h>
 41#endif
 42
 43static void drm_vm_open(struct vm_area_struct *vma);
 44static void drm_vm_close(struct vm_area_struct *vma);
 45
 46static pgprot_t drm_io_prot(struct drm_local_map *map,
 47			    struct vm_area_struct *vma)
 48{
 49	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 50
 51#if defined(__i386__) || defined(__x86_64__)
 52	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
 53		tmp = pgprot_noncached(tmp);
 54	else
 55		tmp = pgprot_writecombine(tmp);
 56#elif defined(__powerpc__)
 57	pgprot_val(tmp) |= _PAGE_NO_CACHE;
 58	if (map->type == _DRM_REGISTERS)
 59		pgprot_val(tmp) |= _PAGE_GUARDED;
 60#elif defined(__ia64__)
 61	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
 62				    vma->vm_start))
 63		tmp = pgprot_writecombine(tmp);
 64	else
 65		tmp = pgprot_noncached(tmp);
 66#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
 67	tmp = pgprot_noncached(tmp);
 68#endif
 69	return tmp;
 70}
 71
 72static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
 73{
 74	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 75
 76#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
 77	tmp |= _PAGE_NO_CACHE;
 78#endif
 79	return tmp;
 80}
 81
 82/**
 83 * \c fault method for AGP virtual memory.
 84 *
 85 * \param vma virtual memory area.
 86 * \param address access address.
 87 * \return pointer to the page structure.
 88 *
 89 * Find the right map and if it's AGP memory find the real physical page to
 90 * map, get the page, increment the use count and return it.
 91 */
 92#if __OS_HAS_AGP
 93static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 94{
 95	struct drm_file *priv = vma->vm_file->private_data;
 96	struct drm_device *dev = priv->minor->dev;
 97	struct drm_local_map *map = NULL;
 98	struct drm_map_list *r_list;
 99	struct drm_hash_item *hash;
100
101	/*
102	 * Find the right map
103	 */
104	if (!dev->agp)
105		goto vm_fault_error;
106
107	if (!dev->agp || !dev->agp->cant_use_aperture)
108		goto vm_fault_error;
109
110	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
111		goto vm_fault_error;
112
113	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
114	map = r_list->map;
115
116	if (map && map->type == _DRM_AGP) {
117		/*
118		 * Using vm_pgoff as a selector forces us to use this unusual
119		 * addressing scheme.
120		 */
121		resource_size_t offset = (unsigned long)vmf->virtual_address -
122			vma->vm_start;
123		resource_size_t baddr = map->offset + offset;
124		struct drm_agp_mem *agpmem;
125		struct page *page;
126
127#ifdef __alpha__
128		/*
129		 * Adjust to a bus-relative address
130		 */
131		baddr -= dev->hose->mem_space->start;
132#endif
133
134		/*
135		 * It's AGP memory - find the real physical page to map
136		 */
137		list_for_each_entry(agpmem, &dev->agp->memory, head) {
138			if (agpmem->bound <= baddr &&
139			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
140				break;
141		}
142
143		if (&agpmem->head == &dev->agp->memory)
144			goto vm_fault_error;
145
146		/*
147		 * Get the page, inc the use count, and return it
148		 */
149		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
150		page = agpmem->memory->pages[offset];
151		get_page(page);
152		vmf->page = page;
153
154		DRM_DEBUG
155		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
156		     (unsigned long long)baddr,
157		     agpmem->memory->pages[offset],
158		     (unsigned long long)offset,
159		     page_count(page));
160		return 0;
161	}
162vm_fault_error:
163	return VM_FAULT_SIGBUS;	/* Disallow mremap */
164}
165#else				/* __OS_HAS_AGP */
166static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
167{
168	return VM_FAULT_SIGBUS;
169}
170#endif				/* __OS_HAS_AGP */
171
172/**
173 * \c nopage method for shared virtual memory.
174 *
175 * \param vma virtual memory area.
176 * \param address access address.
177 * \return pointer to the page structure.
178 *
179 * Get the mapping, find the real physical page to map, get the page, and
180 * return it.
181 */
182static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
183{
184	struct drm_local_map *map = vma->vm_private_data;
185	unsigned long offset;
186	unsigned long i;
187	struct page *page;
188
189	if (!map)
190		return VM_FAULT_SIGBUS;	/* Nothing allocated */
191
192	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
193	i = (unsigned long)map->handle + offset;
194	page = vmalloc_to_page((void *)i);
195	if (!page)
196		return VM_FAULT_SIGBUS;
197	get_page(page);
198	vmf->page = page;
199
200	DRM_DEBUG("shm_fault 0x%lx\n", offset);
201	return 0;
202}
203
204/**
205 * \c close method for shared virtual memory.
206 *
207 * \param vma virtual memory area.
208 *
209 * Deletes map information if we are the last
210 * person to close a mapping and it's not in the global maplist.
211 */
212static void drm_vm_shm_close(struct vm_area_struct *vma)
213{
214	struct drm_file *priv = vma->vm_file->private_data;
215	struct drm_device *dev = priv->minor->dev;
216	struct drm_vma_entry *pt, *temp;
217	struct drm_local_map *map;
218	struct drm_map_list *r_list;
219	int found_maps = 0;
220
221	DRM_DEBUG("0x%08lx,0x%08lx\n",
222		  vma->vm_start, vma->vm_end - vma->vm_start);
 
223
224	map = vma->vm_private_data;
225
226	mutex_lock(&dev->struct_mutex);
227	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
228		if (pt->vma->vm_private_data == map)
229			found_maps++;
230		if (pt->vma == vma) {
231			list_del(&pt->head);
232			kfree(pt);
233		}
234	}
235
236	/* We were the only map that was found */
237	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
238		/* Check to see if we are in the maplist, if we are not, then
239		 * we delete this mappings information.
240		 */
241		found_maps = 0;
242		list_for_each_entry(r_list, &dev->maplist, head) {
243			if (r_list->map == map)
244				found_maps++;
245		}
246
247		if (!found_maps) {
248			drm_dma_handle_t dmah;
249
250			switch (map->type) {
251			case _DRM_REGISTERS:
252			case _DRM_FRAME_BUFFER:
253				arch_phys_wc_del(map->mtrr);
 
 
 
 
 
 
254				iounmap(map->handle);
255				break;
256			case _DRM_SHM:
257				vfree(map->handle);
258				break;
259			case _DRM_AGP:
260			case _DRM_SCATTER_GATHER:
261				break;
262			case _DRM_CONSISTENT:
263				dmah.vaddr = map->handle;
264				dmah.busaddr = map->offset;
265				dmah.size = map->size;
266				__drm_pci_free(dev, &dmah);
267				break;
 
 
 
268			}
269			kfree(map);
270		}
271	}
272	mutex_unlock(&dev->struct_mutex);
273}
274
275/**
276 * \c fault method for DMA virtual memory.
277 *
278 * \param vma virtual memory area.
279 * \param address access address.
280 * \return pointer to the page structure.
281 *
282 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
283 */
284static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
285{
286	struct drm_file *priv = vma->vm_file->private_data;
287	struct drm_device *dev = priv->minor->dev;
288	struct drm_device_dma *dma = dev->dma;
289	unsigned long offset;
290	unsigned long page_nr;
291	struct page *page;
292
293	if (!dma)
294		return VM_FAULT_SIGBUS;	/* Error */
295	if (!dma->pagelist)
296		return VM_FAULT_SIGBUS;	/* Nothing allocated */
297
298	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
299	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
300	page = virt_to_page((void *)dma->pagelist[page_nr]);
301
302	get_page(page);
303	vmf->page = page;
304
305	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
306	return 0;
307}
308
309/**
310 * \c fault method for scatter-gather virtual memory.
311 *
312 * \param vma virtual memory area.
313 * \param address access address.
314 * \return pointer to the page structure.
315 *
316 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
317 */
318static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
319{
320	struct drm_local_map *map = vma->vm_private_data;
321	struct drm_file *priv = vma->vm_file->private_data;
322	struct drm_device *dev = priv->minor->dev;
323	struct drm_sg_mem *entry = dev->sg;
324	unsigned long offset;
325	unsigned long map_offset;
326	unsigned long page_offset;
327	struct page *page;
328
329	if (!entry)
330		return VM_FAULT_SIGBUS;	/* Error */
331	if (!entry->pagelist)
332		return VM_FAULT_SIGBUS;	/* Nothing allocated */
333
334	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
335	map_offset = map->offset - (unsigned long)dev->sg->virtual;
336	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
337	page = entry->pagelist[page_offset];
338	get_page(page);
339	vmf->page = page;
340
341	return 0;
342}
343
344static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
345{
346	return drm_do_vm_fault(vma, vmf);
347}
348
349static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
350{
351	return drm_do_vm_shm_fault(vma, vmf);
352}
353
354static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
355{
356	return drm_do_vm_dma_fault(vma, vmf);
357}
358
359static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
360{
361	return drm_do_vm_sg_fault(vma, vmf);
362}
363
364/** AGP virtual memory operations */
365static const struct vm_operations_struct drm_vm_ops = {
366	.fault = drm_vm_fault,
367	.open = drm_vm_open,
368	.close = drm_vm_close,
369};
370
371/** Shared virtual memory operations */
372static const struct vm_operations_struct drm_vm_shm_ops = {
373	.fault = drm_vm_shm_fault,
374	.open = drm_vm_open,
375	.close = drm_vm_shm_close,
376};
377
378/** DMA virtual memory operations */
379static const struct vm_operations_struct drm_vm_dma_ops = {
380	.fault = drm_vm_dma_fault,
381	.open = drm_vm_open,
382	.close = drm_vm_close,
383};
384
385/** Scatter-gather virtual memory operations */
386static const struct vm_operations_struct drm_vm_sg_ops = {
387	.fault = drm_vm_sg_fault,
388	.open = drm_vm_open,
389	.close = drm_vm_close,
390};
391
392/**
393 * \c open method for shared virtual memory.
394 *
395 * \param vma virtual memory area.
396 *
397 * Create a new drm_vma_entry structure as the \p vma private data entry and
398 * add it to drm_device::vmalist.
399 */
400void drm_vm_open_locked(struct drm_device *dev,
401		struct vm_area_struct *vma)
402{
 
 
403	struct drm_vma_entry *vma_entry;
404
405	DRM_DEBUG("0x%08lx,0x%08lx\n",
406		  vma->vm_start, vma->vm_end - vma->vm_start);
 
407
408	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
409	if (vma_entry) {
410		vma_entry->vma = vma;
411		vma_entry->pid = current->pid;
412		list_add(&vma_entry->head, &dev->vmalist);
413	}
414}
415EXPORT_SYMBOL_GPL(drm_vm_open_locked);
416
417static void drm_vm_open(struct vm_area_struct *vma)
418{
419	struct drm_file *priv = vma->vm_file->private_data;
420	struct drm_device *dev = priv->minor->dev;
421
422	mutex_lock(&dev->struct_mutex);
423	drm_vm_open_locked(dev, vma);
424	mutex_unlock(&dev->struct_mutex);
425}
426
427void drm_vm_close_locked(struct drm_device *dev,
428		struct vm_area_struct *vma)
429{
 
 
430	struct drm_vma_entry *pt, *temp;
431
432	DRM_DEBUG("0x%08lx,0x%08lx\n",
433		  vma->vm_start, vma->vm_end - vma->vm_start);
 
434
435	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
436		if (pt->vma == vma) {
437			list_del(&pt->head);
438			kfree(pt);
439			break;
440		}
441	}
442}
443
444/**
445 * \c close method for all virtual memory types.
446 *
447 * \param vma virtual memory area.
448 *
449 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
450 * free it.
451 */
452static void drm_vm_close(struct vm_area_struct *vma)
453{
454	struct drm_file *priv = vma->vm_file->private_data;
455	struct drm_device *dev = priv->minor->dev;
456
457	mutex_lock(&dev->struct_mutex);
458	drm_vm_close_locked(dev, vma);
459	mutex_unlock(&dev->struct_mutex);
460}
461
462/**
463 * mmap DMA memory.
464 *
465 * \param file_priv DRM file private.
466 * \param vma virtual memory area.
467 * \return zero on success or a negative number on failure.
468 *
469 * Sets the virtual memory area operations structure to vm_dma_ops, the file
470 * pointer, and calls vm_open().
471 */
472static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
473{
474	struct drm_file *priv = filp->private_data;
475	struct drm_device *dev;
476	struct drm_device_dma *dma;
477	unsigned long length = vma->vm_end - vma->vm_start;
478
479	dev = priv->minor->dev;
480	dma = dev->dma;
481	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
482		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
483
484	/* Length must match exact page count */
485	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
486		return -EINVAL;
487	}
488
489	if (!capable(CAP_SYS_ADMIN) &&
490	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
491		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
492#if defined(__i386__) || defined(__x86_64__)
493		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
494#else
495		/* Ye gads this is ugly.  With more thought
496		   we could move this up higher and use
497		   `protection_map' instead.  */
498		vma->vm_page_prot =
499		    __pgprot(pte_val
500			     (pte_wrprotect
501			      (__pte(pgprot_val(vma->vm_page_prot)))));
502#endif
503	}
504
505	vma->vm_ops = &drm_vm_dma_ops;
506
507	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 
508
509	drm_vm_open_locked(dev, vma);
 
510	return 0;
511}
512
513static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
514{
515#ifdef __alpha__
516	return dev->hose->dense_mem_base;
517#else
518	return 0;
519#endif
520}
521
522/**
523 * mmap DMA memory.
524 *
525 * \param file_priv DRM file private.
526 * \param vma virtual memory area.
527 * \return zero on success or a negative number on failure.
528 *
529 * If the virtual memory area has no offset associated with it then it's a DMA
530 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
531 * checks that the restricted flag is not set, sets the virtual memory operations
532 * according to the mapping type and remaps the pages. Finally sets the file
533 * pointer and calls vm_open().
534 */
535int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
536{
537	struct drm_file *priv = filp->private_data;
538	struct drm_device *dev = priv->minor->dev;
539	struct drm_local_map *map = NULL;
540	resource_size_t offset = 0;
541	struct drm_hash_item *hash;
542
543	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
544		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
545
546	if (!priv->authenticated)
547		return -EACCES;
548
549	/* We check for "dma". On Apple's UniNorth, it's valid to have
550	 * the AGP mapped at physical address 0
551	 * --BenH.
552	 */
553	if (!vma->vm_pgoff
554#if __OS_HAS_AGP
555	    && (!dev->agp
556		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
557#endif
558	    )
559		return drm_mmap_dma(filp, vma);
560
561	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
562		DRM_ERROR("Could not find map\n");
563		return -EINVAL;
564	}
565
566	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
567	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
568		return -EPERM;
569
570	/* Check for valid size. */
571	if (map->size < vma->vm_end - vma->vm_start)
572		return -EINVAL;
573
574	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
575		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
576#if defined(__i386__) || defined(__x86_64__)
577		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
578#else
579		/* Ye gads this is ugly.  With more thought
580		   we could move this up higher and use
581		   `protection_map' instead.  */
582		vma->vm_page_prot =
583		    __pgprot(pte_val
584			     (pte_wrprotect
585			      (__pte(pgprot_val(vma->vm_page_prot)))));
586#endif
587	}
588
589	switch (map->type) {
590#if !defined(__arm__)
591	case _DRM_AGP:
592		if (dev->agp && dev->agp->cant_use_aperture) {
593			/*
594			 * On some platforms we can't talk to bus dma address from the CPU, so for
595			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
596			 * pages and mappings in fault()
597			 */
598#if defined(__powerpc__)
599			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
600#endif
601			vma->vm_ops = &drm_vm_ops;
602			break;
603		}
604		/* fall through to _DRM_FRAME_BUFFER... */
605#endif
606	case _DRM_FRAME_BUFFER:
607	case _DRM_REGISTERS:
608		offset = drm_core_get_reg_ofs(dev);
609		vma->vm_page_prot = drm_io_prot(map, vma);
 
 
610		if (io_remap_pfn_range(vma, vma->vm_start,
611				       (map->offset + offset) >> PAGE_SHIFT,
612				       vma->vm_end - vma->vm_start,
613				       vma->vm_page_prot))
614			return -EAGAIN;
 
 
 
 
 
 
 
 
615		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
616			  " offset = 0x%llx\n",
617			  map->type,
618			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
619
620		vma->vm_ops = &drm_vm_ops;
621		break;
622	case _DRM_CONSISTENT:
623		/* Consistent memory is really like shared memory. But
624		 * it's allocated in a different way, so avoid fault */
625		if (remap_pfn_range(vma, vma->vm_start,
626		    page_to_pfn(virt_to_page(map->handle)),
627		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
628			return -EAGAIN;
629		vma->vm_page_prot = drm_dma_prot(map->type, vma);
630	/* fall through to _DRM_SHM */
631	case _DRM_SHM:
632		vma->vm_ops = &drm_vm_shm_ops;
633		vma->vm_private_data = (void *)map;
 
 
 
634		break;
635	case _DRM_SCATTER_GATHER:
636		vma->vm_ops = &drm_vm_sg_ops;
637		vma->vm_private_data = (void *)map;
 
638		vma->vm_page_prot = drm_dma_prot(map->type, vma);
639		break;
640	default:
641		return -EINVAL;	/* This should never happen. */
642	}
643	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 
644
645	drm_vm_open_locked(dev, vma);
 
646	return 0;
647}
648
649int drm_mmap(struct file *filp, struct vm_area_struct *vma)
650{
651	struct drm_file *priv = filp->private_data;
652	struct drm_device *dev = priv->minor->dev;
653	int ret;
654
655	if (drm_device_is_unplugged(dev))
656		return -ENODEV;
657
658	mutex_lock(&dev->struct_mutex);
659	ret = drm_mmap_locked(filp, vma);
660	mutex_unlock(&dev->struct_mutex);
661
662	return ret;
663}
664EXPORT_SYMBOL(drm_mmap);