Linux Audio

Check our new training course

Loading...
v4.6
 
 
 
 
 
 
 
 
   1/*
   2 * Legacy: Generic DRM Buffer Management
   3 *
   4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
   5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
   6 * All Rights Reserved.
   7 *
   8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
   9 * Author: Gareth Hughes <gareth@valinux.com>
  10 *
  11 * Permission is hereby granted, free of charge, to any person obtaining a
  12 * copy of this software and associated documentation files (the "Software"),
  13 * to deal in the Software without restriction, including without limitation
  14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  15 * and/or sell copies of the Software, and to permit persons to whom the
  16 * Software is furnished to do so, subject to the following conditions:
  17 *
  18 * The above copyright notice and this permission notice (including the next
  19 * paragraph) shall be included in all copies or substantial portions of the
  20 * Software.
  21 *
  22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  28 * OTHER DEALINGS IN THE SOFTWARE.
  29 */
  30
  31#include <linux/vmalloc.h>
  32#include <linux/slab.h>
  33#include <linux/log2.h>
  34#include <linux/export.h>
  35#include <asm/shmparam.h>
  36#include <drm/drmP.h>
  37#include "drm_legacy.h"
  38
  39static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
  40						  struct drm_local_map *map)
  41{
  42	struct drm_map_list *entry;
  43	list_for_each_entry(entry, &dev->maplist, head) {
  44		/*
  45		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
  46		 * while PCI resources may live above that, we only compare the
  47		 * lower 32 bits of the map offset for maps of type
  48		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
  49		 * It is assumed that if a driver have more than one resource
  50		 * of each type, the lower 32 bits are different.
  51		 */
  52		if (!entry->map ||
  53		    map->type != entry->map->type ||
  54		    entry->master != dev->primary->master)
  55			continue;
  56		switch (map->type) {
  57		case _DRM_SHM:
  58			if (map->flags != _DRM_CONTAINS_LOCK)
  59				break;
  60			return entry;
  61		case _DRM_REGISTERS:
  62		case _DRM_FRAME_BUFFER:
  63			if ((entry->map->offset & 0xffffffff) ==
  64			    (map->offset & 0xffffffff))
  65				return entry;
  66		default: /* Make gcc happy */
  67			;
  68		}
  69		if (entry->map->offset == map->offset)
  70			return entry;
  71	}
  72
  73	return NULL;
  74}
  75
  76static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
  77			  unsigned long user_token, int hashed_handle, int shm)
  78{
  79	int use_hashed_handle, shift;
  80	unsigned long add;
  81
  82#if (BITS_PER_LONG == 64)
  83	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
  84#elif (BITS_PER_LONG == 32)
  85	use_hashed_handle = hashed_handle;
  86#else
  87#error Unsupported long size. Neither 64 nor 32 bits.
  88#endif
  89
  90	if (!use_hashed_handle) {
  91		int ret;
  92		hash->key = user_token >> PAGE_SHIFT;
  93		ret = drm_ht_insert_item(&dev->map_hash, hash);
  94		if (ret != -EINVAL)
  95			return ret;
  96	}
  97
  98	shift = 0;
  99	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
 100	if (shm && (SHMLBA > PAGE_SIZE)) {
 101		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
 102
 103		/* For shared memory, we have to preserve the SHMLBA
 104		 * bits of the eventual vma->vm_pgoff value during
 105		 * mmap().  Otherwise we run into cache aliasing problems
 106		 * on some platforms.  On these platforms, the pgoff of
 107		 * a mmap() request is used to pick a suitable virtual
 108		 * address for the mmap() region such that it will not
 109		 * cause cache aliasing problems.
 110		 *
 111		 * Therefore, make sure the SHMLBA relevant bits of the
 112		 * hash value we use are equal to those in the original
 113		 * kernel virtual address.
 114		 */
 115		shift = bits;
 116		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
 117	}
 118
 119	return drm_ht_just_insert_please(&dev->map_hash, hash,
 120					 user_token, 32 - PAGE_SHIFT - 3,
 121					 shift, add);
 122}
 123
 124/**
 125 * Core function to create a range of memory available for mapping by a
 126 * non-root process.
 127 *
 128 * Adjusts the memory offset to its absolute value according to the mapping
 129 * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
 130 * applicable and if supported by the kernel.
 131 */
 132static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
 133			   unsigned int size, enum drm_map_type type,
 134			   enum drm_map_flags flags,
 135			   struct drm_map_list ** maplist)
 136{
 137	struct drm_local_map *map;
 138	struct drm_map_list *list;
 139	drm_dma_handle_t *dmah;
 140	unsigned long user_token;
 141	int ret;
 142
 143	map = kmalloc(sizeof(*map), GFP_KERNEL);
 144	if (!map)
 145		return -ENOMEM;
 146
 147	map->offset = offset;
 148	map->size = size;
 149	map->flags = flags;
 150	map->type = type;
 151
 152	/* Only allow shared memory to be removable since we only keep enough
 153	 * book keeping information about shared memory to allow for removal
 154	 * when processes fork.
 155	 */
 156	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
 157		kfree(map);
 158		return -EINVAL;
 159	}
 160	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
 161		  (unsigned long long)map->offset, map->size, map->type);
 162
 163	/* page-align _DRM_SHM maps. They are allocated here so there is no security
 164	 * hole created by that and it works around various broken drivers that use
 165	 * a non-aligned quantity to map the SAREA. --BenH
 166	 */
 167	if (map->type == _DRM_SHM)
 168		map->size = PAGE_ALIGN(map->size);
 169
 170	if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
 171		kfree(map);
 172		return -EINVAL;
 173	}
 174	map->mtrr = -1;
 175	map->handle = NULL;
 176
 177	switch (map->type) {
 178	case _DRM_REGISTERS:
 179	case _DRM_FRAME_BUFFER:
 180#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
 181		if (map->offset + (map->size-1) < map->offset ||
 182		    map->offset < virt_to_phys(high_memory)) {
 183			kfree(map);
 184			return -EINVAL;
 185		}
 186#endif
 187		/* Some drivers preinitialize some maps, without the X Server
 188		 * needing to be aware of it.  Therefore, we just return success
 189		 * when the server tries to create a duplicate map.
 190		 */
 191		list = drm_find_matching_map(dev, map);
 192		if (list != NULL) {
 193			if (list->map->size != map->size) {
 194				DRM_DEBUG("Matching maps of type %d with "
 195					  "mismatched sizes, (%ld vs %ld)\n",
 196					  map->type, map->size,
 197					  list->map->size);
 198				list->map->size = map->size;
 199			}
 200
 201			kfree(map);
 202			*maplist = list;
 203			return 0;
 204		}
 205
 206		if (map->type == _DRM_FRAME_BUFFER ||
 207		    (map->flags & _DRM_WRITE_COMBINING)) {
 208			map->mtrr =
 209				arch_phys_wc_add(map->offset, map->size);
 
 
 210		}
 211		if (map->type == _DRM_REGISTERS) {
 212			if (map->flags & _DRM_WRITE_COMBINING)
 213				map->handle = ioremap_wc(map->offset,
 214							 map->size);
 215			else
 216				map->handle = ioremap(map->offset, map->size);
 217			if (!map->handle) {
 218				kfree(map);
 219				return -ENOMEM;
 220			}
 221		}
 222
 223		break;
 224	case _DRM_SHM:
 225		list = drm_find_matching_map(dev, map);
 226		if (list != NULL) {
 227			if(list->map->size != map->size) {
 228				DRM_DEBUG("Matching maps of type %d with "
 229					  "mismatched sizes, (%ld vs %ld)\n",
 230					  map->type, map->size, list->map->size);
 231				list->map->size = map->size;
 232			}
 233
 234			kfree(map);
 235			*maplist = list;
 236			return 0;
 237		}
 238		map->handle = vmalloc_user(map->size);
 239		DRM_DEBUG("%lu %d %p\n",
 240			  map->size, order_base_2(map->size), map->handle);
 241		if (!map->handle) {
 242			kfree(map);
 243			return -ENOMEM;
 244		}
 245		map->offset = (unsigned long)map->handle;
 246		if (map->flags & _DRM_CONTAINS_LOCK) {
 247			/* Prevent a 2nd X Server from creating a 2nd lock */
 248			if (dev->primary->master->lock.hw_lock != NULL) {
 249				vfree(map->handle);
 250				kfree(map);
 251				return -EBUSY;
 252			}
 253			dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;	/* Pointer to lock */
 254		}
 255		break;
 256	case _DRM_AGP: {
 257		struct drm_agp_mem *entry;
 258		int valid = 0;
 259
 260		if (!dev->agp) {
 261			kfree(map);
 262			return -EINVAL;
 263		}
 264#ifdef __alpha__
 265		map->offset += dev->hose->mem_space->start;
 266#endif
 267		/* In some cases (i810 driver), user space may have already
 268		 * added the AGP base itself, because dev->agp->base previously
 269		 * only got set during AGP enable.  So, only add the base
 270		 * address if the map's offset isn't already within the
 271		 * aperture.
 272		 */
 273		if (map->offset < dev->agp->base ||
 274		    map->offset > dev->agp->base +
 275		    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
 276			map->offset += dev->agp->base;
 277		}
 278		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
 279
 280		/* This assumes the DRM is in total control of AGP space.
 281		 * It's not always the case as AGP can be in the control
 282		 * of user space (i.e. i810 driver). So this loop will get
 283		 * skipped and we double check that dev->agp->memory is
 284		 * actually set as well as being invalid before EPERM'ing
 285		 */
 286		list_for_each_entry(entry, &dev->agp->memory, head) {
 287			if ((map->offset >= entry->bound) &&
 288			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
 289				valid = 1;
 290				break;
 291			}
 292		}
 293		if (!list_empty(&dev->agp->memory) && !valid) {
 294			kfree(map);
 295			return -EPERM;
 296		}
 297		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
 298			  (unsigned long long)map->offset, map->size);
 299
 300		break;
 301	}
 
 
 
 302	case _DRM_SCATTER_GATHER:
 303		if (!dev->sg) {
 304			kfree(map);
 305			return -EINVAL;
 306		}
 307		map->offset += (unsigned long)dev->sg->virtual;
 308		break;
 309	case _DRM_CONSISTENT:
 310		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
 311		 * As we're limiting the address to 2^32-1 (or less),
 312		 * casting it down to 32 bits is no problem, but we
 313		 * need to point to a 64bit variable first. */
 314		dmah = drm_pci_alloc(dev, map->size, map->size);
 315		if (!dmah) {
 316			kfree(map);
 317			return -ENOMEM;
 318		}
 319		map->handle = dmah->vaddr;
 320		map->offset = (unsigned long)dmah->busaddr;
 321		kfree(dmah);
 322		break;
 323	default:
 324		kfree(map);
 325		return -EINVAL;
 326	}
 327
 328	list = kzalloc(sizeof(*list), GFP_KERNEL);
 329	if (!list) {
 330		if (map->type == _DRM_REGISTERS)
 331			iounmap(map->handle);
 332		kfree(map);
 333		return -EINVAL;
 334	}
 335	list->map = map;
 336
 337	mutex_lock(&dev->struct_mutex);
 338	list_add(&list->head, &dev->maplist);
 339
 340	/* Assign a 32-bit handle */
 341	/* We do it here so that dev->struct_mutex protects the increment */
 342	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
 343		map->offset;
 344	ret = drm_map_handle(dev, &list->hash, user_token, 0,
 345			     (map->type == _DRM_SHM));
 346	if (ret) {
 347		if (map->type == _DRM_REGISTERS)
 348			iounmap(map->handle);
 349		kfree(map);
 350		kfree(list);
 351		mutex_unlock(&dev->struct_mutex);
 352		return ret;
 353	}
 354
 355	list->user_token = list->hash.key << PAGE_SHIFT;
 356	mutex_unlock(&dev->struct_mutex);
 357
 358	if (!(map->flags & _DRM_DRIVER))
 359		list->master = dev->primary->master;
 360	*maplist = list;
 361	return 0;
 362}
 363
 364int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
 365		      unsigned int size, enum drm_map_type type,
 366		      enum drm_map_flags flags, struct drm_local_map **map_ptr)
 367{
 368	struct drm_map_list *list;
 369	int rc;
 370
 371	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
 372	if (!rc)
 373		*map_ptr = list->map;
 374	return rc;
 375}
 376EXPORT_SYMBOL(drm_legacy_addmap);
 
 377
 378/**
 379 * Ioctl to specify a range of memory that is available for mapping by a
 380 * non-root process.
 381 *
 382 * \param inode device inode.
 383 * \param file_priv DRM file private.
 384 * \param cmd command.
 385 * \param arg pointer to a drm_map structure.
 386 * \return zero on success or a negative value on error.
 387 *
 388 */
 389int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
 390			    struct drm_file *file_priv)
 391{
 392	struct drm_map *map = data;
 393	struct drm_map_list *maplist;
 394	int err;
 395
 396	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
 397		return -EPERM;
 398
 399	err = drm_addmap_core(dev, map->offset, map->size, map->type,
 400			      map->flags, &maplist);
 401
 402	if (err)
 403		return err;
 404
 405	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
 406	map->handle = (void *)(unsigned long)maplist->user_token;
 407
 408	/*
 409	 * It appears that there are no users of this value whatsoever --
 410	 * drmAddMap just discards it.  Let's not encourage its use.
 411	 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
 412	 *  it's not a real mtrr index anymore.)
 413	 */
 414	map->mtrr = -1;
 415
 416	return 0;
 417}
 418
 419/**
 420 * Remove a map private from list and deallocate resources if the mapping
 421 * isn't in use.
 422 *
 423 * Searches the map on drm_device::maplist, removes it from the list, see if
 424 * its being used, and free any associate resource (such as MTRR's) if it's not
 425 * being on use.
 426 *
 427 * \sa drm_legacy_addmap
 428 */
 429int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 430{
 431	struct drm_map_list *r_list = NULL, *list_t;
 432	drm_dma_handle_t dmah;
 433	int found = 0;
 434	struct drm_master *master;
 435
 436	/* Find the list entry for the map and remove it */
 437	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
 438		if (r_list->map == map) {
 439			master = r_list->master;
 440			list_del(&r_list->head);
 441			drm_ht_remove_key(&dev->map_hash,
 442					  r_list->user_token >> PAGE_SHIFT);
 443			kfree(r_list);
 444			found = 1;
 445			break;
 446		}
 447	}
 448
 449	if (!found)
 450		return -EINVAL;
 451
 452	switch (map->type) {
 453	case _DRM_REGISTERS:
 454		iounmap(map->handle);
 455		/* FALLTHROUGH */
 456	case _DRM_FRAME_BUFFER:
 457		arch_phys_wc_del(map->mtrr);
 
 
 
 
 458		break;
 459	case _DRM_SHM:
 460		vfree(map->handle);
 461		if (master) {
 462			if (dev->sigdata.lock == master->lock.hw_lock)
 463				dev->sigdata.lock = NULL;
 464			master->lock.hw_lock = NULL;   /* SHM removed */
 465			master->lock.file_priv = NULL;
 466			wake_up_interruptible_all(&master->lock.lock_queue);
 467		}
 468		break;
 469	case _DRM_AGP:
 470	case _DRM_SCATTER_GATHER:
 471		break;
 472	case _DRM_CONSISTENT:
 473		dmah.vaddr = map->handle;
 474		dmah.busaddr = map->offset;
 475		dmah.size = map->size;
 476		__drm_legacy_pci_free(dev, &dmah);
 
 
 
 477		break;
 478	}
 479	kfree(map);
 480
 481	return 0;
 482}
 483EXPORT_SYMBOL(drm_legacy_rmmap_locked);
 484
 485int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
 486{
 487	int ret;
 488
 489	mutex_lock(&dev->struct_mutex);
 490	ret = drm_legacy_rmmap_locked(dev, map);
 491	mutex_unlock(&dev->struct_mutex);
 492
 493	return ret;
 494}
 495EXPORT_SYMBOL(drm_legacy_rmmap);
 496
 497/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
 498 * the last close of the device, and this is necessary for cleanup when things
 499 * exit uncleanly.  Therefore, having userland manually remove mappings seems
 500 * like a pointless exercise since they're going away anyway.
 501 *
 502 * One use case might be after addmap is allowed for normal users for SHM and
 503 * gets used by drivers that the server doesn't need to care about.  This seems
 504 * unlikely.
 505 *
 506 * \param inode device inode.
 507 * \param file_priv DRM file private.
 508 * \param cmd command.
 509 * \param arg pointer to a struct drm_map structure.
 510 * \return zero on success or a negative value on error.
 511 */
 512int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
 513			   struct drm_file *file_priv)
 514{
 515	struct drm_map *request = data;
 516	struct drm_local_map *map = NULL;
 517	struct drm_map_list *r_list;
 518	int ret;
 519
 520	mutex_lock(&dev->struct_mutex);
 521	list_for_each_entry(r_list, &dev->maplist, head) {
 522		if (r_list->map &&
 523		    r_list->user_token == (unsigned long)request->handle &&
 524		    r_list->map->flags & _DRM_REMOVABLE) {
 525			map = r_list->map;
 526			break;
 527		}
 528	}
 529
 530	/* List has wrapped around to the head pointer, or its empty we didn't
 531	 * find anything.
 532	 */
 533	if (list_empty(&dev->maplist) || !map) {
 534		mutex_unlock(&dev->struct_mutex);
 535		return -EINVAL;
 536	}
 537
 538	/* Register and framebuffer maps are permanent */
 539	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
 540		mutex_unlock(&dev->struct_mutex);
 541		return 0;
 542	}
 543
 544	ret = drm_legacy_rmmap_locked(dev, map);
 545
 546	mutex_unlock(&dev->struct_mutex);
 547
 548	return ret;
 549}
 550
 551/**
 552 * Cleanup after an error on one of the addbufs() functions.
 553 *
 554 * \param dev DRM device.
 555 * \param entry buffer entry where the error occurred.
 556 *
 557 * Frees any pages and buffers associated with the given entry.
 558 */
 559static void drm_cleanup_buf_error(struct drm_device * dev,
 560				  struct drm_buf_entry * entry)
 561{
 562	int i;
 563
 564	if (entry->seg_count) {
 565		for (i = 0; i < entry->seg_count; i++) {
 566			if (entry->seglist[i]) {
 567				drm_pci_free(dev, entry->seglist[i]);
 568			}
 569		}
 570		kfree(entry->seglist);
 571
 572		entry->seg_count = 0;
 573	}
 574
 575	if (entry->buf_count) {
 576		for (i = 0; i < entry->buf_count; i++) {
 577			kfree(entry->buflist[i].dev_private);
 578		}
 579		kfree(entry->buflist);
 580
 581		entry->buf_count = 0;
 582	}
 583}
 584
 585#if IS_ENABLED(CONFIG_AGP)
 586/**
 587 * Add AGP buffers for DMA transfers.
 588 *
 589 * \param dev struct drm_device to which the buffers are to be added.
 590 * \param request pointer to a struct drm_buf_desc describing the request.
 591 * \return zero on success or a negative number on failure.
 592 *
 593 * After some sanity checks creates a drm_buf structure for each buffer and
 594 * reallocates the buffer list of the same size order to accommodate the new
 595 * buffers.
 596 */
 597int drm_legacy_addbufs_agp(struct drm_device *dev,
 598			   struct drm_buf_desc *request)
 599{
 600	struct drm_device_dma *dma = dev->dma;
 601	struct drm_buf_entry *entry;
 602	struct drm_agp_mem *agp_entry;
 603	struct drm_buf *buf;
 604	unsigned long offset;
 605	unsigned long agp_offset;
 606	int count;
 607	int order;
 608	int size;
 609	int alignment;
 610	int page_order;
 611	int total;
 612	int byte_count;
 613	int i, valid;
 614	struct drm_buf **temp_buflist;
 615
 616	if (!dma)
 617		return -EINVAL;
 618
 619	count = request->count;
 620	order = order_base_2(request->size);
 621	size = 1 << order;
 622
 623	alignment = (request->flags & _DRM_PAGE_ALIGN)
 624	    ? PAGE_ALIGN(size) : size;
 625	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 626	total = PAGE_SIZE << page_order;
 627
 628	byte_count = 0;
 629	agp_offset = dev->agp->base + request->agp_start;
 630
 631	DRM_DEBUG("count:      %d\n", count);
 632	DRM_DEBUG("order:      %d\n", order);
 633	DRM_DEBUG("size:       %d\n", size);
 634	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
 635	DRM_DEBUG("alignment:  %d\n", alignment);
 636	DRM_DEBUG("page_order: %d\n", page_order);
 637	DRM_DEBUG("total:      %d\n", total);
 638
 639	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 640		return -EINVAL;
 
 
 641
 642	/* Make sure buffers are located in AGP memory that we own */
 643	valid = 0;
 644	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
 645		if ((agp_offset >= agp_entry->bound) &&
 646		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
 647			valid = 1;
 648			break;
 649		}
 650	}
 651	if (!list_empty(&dev->agp->memory) && !valid) {
 652		DRM_DEBUG("zone invalid\n");
 653		return -EINVAL;
 654	}
 655	spin_lock(&dev->buf_lock);
 656	if (dev->buf_use) {
 657		spin_unlock(&dev->buf_lock);
 658		return -EBUSY;
 659	}
 660	atomic_inc(&dev->buf_alloc);
 661	spin_unlock(&dev->buf_lock);
 662
 663	mutex_lock(&dev->struct_mutex);
 664	entry = &dma->bufs[order];
 665	if (entry->buf_count) {
 666		mutex_unlock(&dev->struct_mutex);
 667		atomic_dec(&dev->buf_alloc);
 668		return -ENOMEM;	/* May only call once for each order */
 669	}
 670
 671	if (count < 0 || count > 4096) {
 672		mutex_unlock(&dev->struct_mutex);
 673		atomic_dec(&dev->buf_alloc);
 674		return -EINVAL;
 675	}
 676
 677	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
 678	if (!entry->buflist) {
 679		mutex_unlock(&dev->struct_mutex);
 680		atomic_dec(&dev->buf_alloc);
 681		return -ENOMEM;
 682	}
 683
 684	entry->buf_size = size;
 685	entry->page_order = page_order;
 686
 687	offset = 0;
 688
 689	while (entry->buf_count < count) {
 690		buf = &entry->buflist[entry->buf_count];
 691		buf->idx = dma->buf_count + entry->buf_count;
 692		buf->total = alignment;
 693		buf->order = order;
 694		buf->used = 0;
 695
 696		buf->offset = (dma->byte_count + offset);
 697		buf->bus_address = agp_offset + offset;
 698		buf->address = (void *)(agp_offset + offset);
 699		buf->next = NULL;
 700		buf->waiting = 0;
 701		buf->pending = 0;
 
 702		buf->file_priv = NULL;
 703
 704		buf->dev_priv_size = dev->driver->dev_priv_size;
 705		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
 706		if (!buf->dev_private) {
 707			/* Set count correctly so we free the proper amount. */
 708			entry->buf_count = count;
 709			drm_cleanup_buf_error(dev, entry);
 710			mutex_unlock(&dev->struct_mutex);
 711			atomic_dec(&dev->buf_alloc);
 712			return -ENOMEM;
 713		}
 714
 715		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
 716
 717		offset += alignment;
 718		entry->buf_count++;
 719		byte_count += PAGE_SIZE << page_order;
 720	}
 721
 722	DRM_DEBUG("byte_count: %d\n", byte_count);
 723
 724	temp_buflist = krealloc(dma->buflist,
 725				(dma->buf_count + entry->buf_count) *
 726				sizeof(*dma->buflist), GFP_KERNEL);
 727	if (!temp_buflist) {
 728		/* Free the entry because it isn't valid */
 729		drm_cleanup_buf_error(dev, entry);
 730		mutex_unlock(&dev->struct_mutex);
 731		atomic_dec(&dev->buf_alloc);
 732		return -ENOMEM;
 733	}
 734	dma->buflist = temp_buflist;
 735
 736	for (i = 0; i < entry->buf_count; i++) {
 737		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 738	}
 739
 740	dma->buf_count += entry->buf_count;
 741	dma->seg_count += entry->seg_count;
 742	dma->page_count += byte_count >> PAGE_SHIFT;
 743	dma->byte_count += byte_count;
 744
 745	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
 746	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 747
 748	mutex_unlock(&dev->struct_mutex);
 749
 750	request->count = entry->buf_count;
 751	request->size = size;
 752
 753	dma->flags = _DRM_DMA_USE_AGP;
 754
 755	atomic_dec(&dev->buf_alloc);
 756	return 0;
 757}
 758EXPORT_SYMBOL(drm_legacy_addbufs_agp);
 759#endif /* CONFIG_AGP */
 760
 761int drm_legacy_addbufs_pci(struct drm_device *dev,
 762			   struct drm_buf_desc *request)
 763{
 764	struct drm_device_dma *dma = dev->dma;
 765	int count;
 766	int order;
 767	int size;
 768	int total;
 769	int page_order;
 770	struct drm_buf_entry *entry;
 771	drm_dma_handle_t *dmah;
 772	struct drm_buf *buf;
 773	int alignment;
 774	unsigned long offset;
 775	int i;
 776	int byte_count;
 777	int page_count;
 778	unsigned long *temp_pagelist;
 779	struct drm_buf **temp_buflist;
 780
 781	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
 782		return -EINVAL;
 783
 784	if (!dma)
 785		return -EINVAL;
 786
 787	if (!capable(CAP_SYS_ADMIN))
 788		return -EPERM;
 789
 790	count = request->count;
 791	order = order_base_2(request->size);
 792	size = 1 << order;
 793
 794	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
 795		  request->count, request->size, size, order);
 796
 797	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 798		return -EINVAL;
 
 
 799
 800	alignment = (request->flags & _DRM_PAGE_ALIGN)
 801	    ? PAGE_ALIGN(size) : size;
 802	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 803	total = PAGE_SIZE << page_order;
 804
 805	spin_lock(&dev->buf_lock);
 806	if (dev->buf_use) {
 807		spin_unlock(&dev->buf_lock);
 808		return -EBUSY;
 809	}
 810	atomic_inc(&dev->buf_alloc);
 811	spin_unlock(&dev->buf_lock);
 812
 813	mutex_lock(&dev->struct_mutex);
 814	entry = &dma->bufs[order];
 815	if (entry->buf_count) {
 816		mutex_unlock(&dev->struct_mutex);
 817		atomic_dec(&dev->buf_alloc);
 818		return -ENOMEM;	/* May only call once for each order */
 819	}
 820
 821	if (count < 0 || count > 4096) {
 822		mutex_unlock(&dev->struct_mutex);
 823		atomic_dec(&dev->buf_alloc);
 824		return -EINVAL;
 825	}
 826
 827	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
 828	if (!entry->buflist) {
 829		mutex_unlock(&dev->struct_mutex);
 830		atomic_dec(&dev->buf_alloc);
 831		return -ENOMEM;
 832	}
 833
 834	entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
 835	if (!entry->seglist) {
 836		kfree(entry->buflist);
 837		mutex_unlock(&dev->struct_mutex);
 838		atomic_dec(&dev->buf_alloc);
 839		return -ENOMEM;
 840	}
 841
 842	/* Keep the original pagelist until we know all the allocations
 843	 * have succeeded
 844	 */
 845	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
 846			       sizeof(*dma->pagelist), GFP_KERNEL);
 847	if (!temp_pagelist) {
 848		kfree(entry->buflist);
 849		kfree(entry->seglist);
 850		mutex_unlock(&dev->struct_mutex);
 851		atomic_dec(&dev->buf_alloc);
 852		return -ENOMEM;
 853	}
 854	memcpy(temp_pagelist,
 855	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
 856	DRM_DEBUG("pagelist: %d entries\n",
 857		  dma->page_count + (count << page_order));
 858
 859	entry->buf_size = size;
 860	entry->page_order = page_order;
 861	byte_count = 0;
 862	page_count = 0;
 863
 864	while (entry->buf_count < count) {
 865
 866		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
 867
 868		if (!dmah) {
 869			/* Set count correctly so we free the proper amount. */
 870			entry->buf_count = count;
 871			entry->seg_count = count;
 872			drm_cleanup_buf_error(dev, entry);
 873			kfree(temp_pagelist);
 874			mutex_unlock(&dev->struct_mutex);
 875			atomic_dec(&dev->buf_alloc);
 876			return -ENOMEM;
 877		}
 878		entry->seglist[entry->seg_count++] = dmah;
 879		for (i = 0; i < (1 << page_order); i++) {
 880			DRM_DEBUG("page %d @ 0x%08lx\n",
 881				  dma->page_count + page_count,
 882				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
 883			temp_pagelist[dma->page_count + page_count++]
 884				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
 885		}
 886		for (offset = 0;
 887		     offset + size <= total && entry->buf_count < count;
 888		     offset += alignment, ++entry->buf_count) {
 889			buf = &entry->buflist[entry->buf_count];
 890			buf->idx = dma->buf_count + entry->buf_count;
 891			buf->total = alignment;
 892			buf->order = order;
 893			buf->used = 0;
 894			buf->offset = (dma->byte_count + byte_count + offset);
 895			buf->address = (void *)(dmah->vaddr + offset);
 896			buf->bus_address = dmah->busaddr + offset;
 897			buf->next = NULL;
 898			buf->waiting = 0;
 899			buf->pending = 0;
 
 900			buf->file_priv = NULL;
 901
 902			buf->dev_priv_size = dev->driver->dev_priv_size;
 903			buf->dev_private = kzalloc(buf->dev_priv_size,
 904						GFP_KERNEL);
 905			if (!buf->dev_private) {
 906				/* Set count correctly so we free the proper amount. */
 907				entry->buf_count = count;
 908				entry->seg_count = count;
 909				drm_cleanup_buf_error(dev, entry);
 910				kfree(temp_pagelist);
 911				mutex_unlock(&dev->struct_mutex);
 912				atomic_dec(&dev->buf_alloc);
 913				return -ENOMEM;
 914			}
 915
 916			DRM_DEBUG("buffer %d @ %p\n",
 917				  entry->buf_count, buf->address);
 918		}
 919		byte_count += PAGE_SIZE << page_order;
 920	}
 921
 922	temp_buflist = krealloc(dma->buflist,
 923				(dma->buf_count + entry->buf_count) *
 924				sizeof(*dma->buflist), GFP_KERNEL);
 925	if (!temp_buflist) {
 926		/* Free the entry because it isn't valid */
 927		drm_cleanup_buf_error(dev, entry);
 928		kfree(temp_pagelist);
 929		mutex_unlock(&dev->struct_mutex);
 930		atomic_dec(&dev->buf_alloc);
 931		return -ENOMEM;
 932	}
 933	dma->buflist = temp_buflist;
 934
 935	for (i = 0; i < entry->buf_count; i++) {
 936		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 937	}
 938
 939	/* No allocations failed, so now we can replace the original pagelist
 940	 * with the new one.
 941	 */
 942	if (dma->page_count) {
 943		kfree(dma->pagelist);
 944	}
 945	dma->pagelist = temp_pagelist;
 946
 947	dma->buf_count += entry->buf_count;
 948	dma->seg_count += entry->seg_count;
 949	dma->page_count += entry->seg_count << page_order;
 950	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
 951
 952	mutex_unlock(&dev->struct_mutex);
 953
 954	request->count = entry->buf_count;
 955	request->size = size;
 956
 957	if (request->flags & _DRM_PCI_BUFFER_RO)
 958		dma->flags = _DRM_DMA_USE_PCI_RO;
 959
 960	atomic_dec(&dev->buf_alloc);
 961	return 0;
 962
 963}
 964EXPORT_SYMBOL(drm_legacy_addbufs_pci);
 965
 966static int drm_legacy_addbufs_sg(struct drm_device *dev,
 967				 struct drm_buf_desc *request)
 968{
 969	struct drm_device_dma *dma = dev->dma;
 970	struct drm_buf_entry *entry;
 971	struct drm_buf *buf;
 972	unsigned long offset;
 973	unsigned long agp_offset;
 974	int count;
 975	int order;
 976	int size;
 977	int alignment;
 978	int page_order;
 979	int total;
 980	int byte_count;
 981	int i;
 982	struct drm_buf **temp_buflist;
 983
 984	if (!drm_core_check_feature(dev, DRIVER_SG))
 985		return -EINVAL;
 986
 987	if (!dma)
 988		return -EINVAL;
 989
 990	if (!capable(CAP_SYS_ADMIN))
 991		return -EPERM;
 992
 993	count = request->count;
 994	order = order_base_2(request->size);
 995	size = 1 << order;
 996
 997	alignment = (request->flags & _DRM_PAGE_ALIGN)
 998	    ? PAGE_ALIGN(size) : size;
 999	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1000	total = PAGE_SIZE << page_order;
1001
1002	byte_count = 0;
1003	agp_offset = request->agp_start;
1004
1005	DRM_DEBUG("count:      %d\n", count);
1006	DRM_DEBUG("order:      %d\n", order);
1007	DRM_DEBUG("size:       %d\n", size);
1008	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1009	DRM_DEBUG("alignment:  %d\n", alignment);
1010	DRM_DEBUG("page_order: %d\n", page_order);
1011	DRM_DEBUG("total:      %d\n", total);
1012
1013	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1014		return -EINVAL;
 
 
1015
1016	spin_lock(&dev->buf_lock);
1017	if (dev->buf_use) {
1018		spin_unlock(&dev->buf_lock);
1019		return -EBUSY;
1020	}
1021	atomic_inc(&dev->buf_alloc);
1022	spin_unlock(&dev->buf_lock);
1023
1024	mutex_lock(&dev->struct_mutex);
1025	entry = &dma->bufs[order];
1026	if (entry->buf_count) {
1027		mutex_unlock(&dev->struct_mutex);
1028		atomic_dec(&dev->buf_alloc);
1029		return -ENOMEM;	/* May only call once for each order */
1030	}
1031
1032	if (count < 0 || count > 4096) {
1033		mutex_unlock(&dev->struct_mutex);
1034		atomic_dec(&dev->buf_alloc);
1035		return -EINVAL;
1036	}
1037
1038	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1039				GFP_KERNEL);
1040	if (!entry->buflist) {
1041		mutex_unlock(&dev->struct_mutex);
1042		atomic_dec(&dev->buf_alloc);
1043		return -ENOMEM;
1044	}
1045
1046	entry->buf_size = size;
1047	entry->page_order = page_order;
1048
1049	offset = 0;
1050
1051	while (entry->buf_count < count) {
1052		buf = &entry->buflist[entry->buf_count];
1053		buf->idx = dma->buf_count + entry->buf_count;
1054		buf->total = alignment;
1055		buf->order = order;
1056		buf->used = 0;
1057
1058		buf->offset = (dma->byte_count + offset);
1059		buf->bus_address = agp_offset + offset;
1060		buf->address = (void *)(agp_offset + offset
1061					+ (unsigned long)dev->sg->virtual);
1062		buf->next = NULL;
1063		buf->waiting = 0;
1064		buf->pending = 0;
 
1065		buf->file_priv = NULL;
1066
1067		buf->dev_priv_size = dev->driver->dev_priv_size;
1068		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1069		if (!buf->dev_private) {
1070			/* Set count correctly so we free the proper amount. */
1071			entry->buf_count = count;
1072			drm_cleanup_buf_error(dev, entry);
1073			mutex_unlock(&dev->struct_mutex);
1074			atomic_dec(&dev->buf_alloc);
1075			return -ENOMEM;
1076		}
1077
1078		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1079
1080		offset += alignment;
1081		entry->buf_count++;
1082		byte_count += PAGE_SIZE << page_order;
1083	}
1084
1085	DRM_DEBUG("byte_count: %d\n", byte_count);
1086
1087	temp_buflist = krealloc(dma->buflist,
1088				(dma->buf_count + entry->buf_count) *
1089				sizeof(*dma->buflist), GFP_KERNEL);
1090	if (!temp_buflist) {
1091		/* Free the entry because it isn't valid */
1092		drm_cleanup_buf_error(dev, entry);
1093		mutex_unlock(&dev->struct_mutex);
1094		atomic_dec(&dev->buf_alloc);
1095		return -ENOMEM;
1096	}
1097	dma->buflist = temp_buflist;
1098
1099	for (i = 0; i < entry->buf_count; i++) {
1100		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1101	}
1102
1103	dma->buf_count += entry->buf_count;
1104	dma->seg_count += entry->seg_count;
1105	dma->page_count += byte_count >> PAGE_SHIFT;
1106	dma->byte_count += byte_count;
1107
1108	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1109	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1110
1111	mutex_unlock(&dev->struct_mutex);
1112
1113	request->count = entry->buf_count;
1114	request->size = size;
1115
1116	dma->flags = _DRM_DMA_USE_SG;
1117
1118	atomic_dec(&dev->buf_alloc);
1119	return 0;
1120}
1121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122/**
1123 * Add buffers for DMA transfers (ioctl).
1124 *
1125 * \param inode device inode.
1126 * \param file_priv DRM file private.
1127 * \param cmd command.
1128 * \param arg pointer to a struct drm_buf_desc request.
1129 * \return zero on success or a negative number on failure.
1130 *
1131 * According with the memory type specified in drm_buf_desc::flags and the
1132 * build options, it dispatches the call either to addbufs_agp(),
1133 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1134 * PCI memory respectively.
1135 */
1136int drm_legacy_addbufs(struct drm_device *dev, void *data,
1137		       struct drm_file *file_priv)
1138{
1139	struct drm_buf_desc *request = data;
1140	int ret;
1141
1142	if (drm_core_check_feature(dev, DRIVER_MODESET))
1143		return -EINVAL;
1144
1145	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1146		return -EINVAL;
1147
1148#if IS_ENABLED(CONFIG_AGP)
1149	if (request->flags & _DRM_AGP_BUFFER)
1150		ret = drm_legacy_addbufs_agp(dev, request);
1151	else
1152#endif
1153	if (request->flags & _DRM_SG_BUFFER)
1154		ret = drm_legacy_addbufs_sg(dev, request);
1155	else if (request->flags & _DRM_FB_BUFFER)
1156		ret = -EINVAL;
1157	else
1158		ret = drm_legacy_addbufs_pci(dev, request);
1159
1160	return ret;
1161}
1162
1163/**
1164 * Get information about the buffer mappings.
1165 *
1166 * This was originally mean for debugging purposes, or by a sophisticated
1167 * client library to determine how best to use the available buffers (e.g.,
1168 * large buffers can be used for image transfer).
1169 *
1170 * \param inode device inode.
1171 * \param file_priv DRM file private.
1172 * \param cmd command.
1173 * \param arg pointer to a drm_buf_info structure.
1174 * \return zero on success or a negative number on failure.
1175 *
1176 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1177 * lock, preventing of allocating more buffers after this call. Information
1178 * about each requested buffer is then copied into user space.
1179 */
1180int drm_legacy_infobufs(struct drm_device *dev, void *data,
1181			struct drm_file *file_priv)
1182{
1183	struct drm_device_dma *dma = dev->dma;
1184	struct drm_buf_info *request = data;
1185	int i;
1186	int count;
1187
1188	if (drm_core_check_feature(dev, DRIVER_MODESET))
1189		return -EINVAL;
1190
1191	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1192		return -EINVAL;
1193
1194	if (!dma)
1195		return -EINVAL;
1196
1197	spin_lock(&dev->buf_lock);
1198	if (atomic_read(&dev->buf_alloc)) {
1199		spin_unlock(&dev->buf_lock);
1200		return -EBUSY;
1201	}
1202	++dev->buf_use;		/* Can't allocate more after this call */
1203	spin_unlock(&dev->buf_lock);
1204
1205	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1206		if (dma->bufs[i].buf_count)
1207			++count;
1208	}
1209
1210	DRM_DEBUG("count = %d\n", count);
1211
1212	if (request->count >= count) {
1213		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1214			if (dma->bufs[i].buf_count) {
1215				struct drm_buf_desc __user *to =
1216				    &request->list[count];
1217				struct drm_buf_entry *from = &dma->bufs[i];
 
1218				if (copy_to_user(&to->count,
1219						 &from->buf_count,
1220						 sizeof(from->buf_count)) ||
1221				    copy_to_user(&to->size,
1222						 &from->buf_size,
1223						 sizeof(from->buf_size)) ||
1224				    copy_to_user(&to->low_mark,
1225						 &from->low_mark,
1226						 sizeof(from->low_mark)) ||
1227				    copy_to_user(&to->high_mark,
1228						 &from->high_mark,
1229						 sizeof(from->high_mark)))
1230					return -EFAULT;
1231
1232				DRM_DEBUG("%d %d %d %d %d\n",
1233					  i,
1234					  dma->bufs[i].buf_count,
1235					  dma->bufs[i].buf_size,
1236					  dma->bufs[i].low_mark,
1237					  dma->bufs[i].high_mark);
1238				++count;
1239			}
1240		}
1241	}
1242	request->count = count;
1243
1244	return 0;
1245}
1246
1247/**
1248 * Specifies a low and high water mark for buffer allocation
1249 *
1250 * \param inode device inode.
1251 * \param file_priv DRM file private.
1252 * \param cmd command.
1253 * \param arg a pointer to a drm_buf_desc structure.
1254 * \return zero on success or a negative number on failure.
1255 *
1256 * Verifies that the size order is bounded between the admissible orders and
1257 * updates the respective drm_device_dma::bufs entry low and high water mark.
1258 *
1259 * \note This ioctl is deprecated and mostly never used.
1260 */
1261int drm_legacy_markbufs(struct drm_device *dev, void *data,
1262			struct drm_file *file_priv)
1263{
1264	struct drm_device_dma *dma = dev->dma;
1265	struct drm_buf_desc *request = data;
1266	int order;
1267	struct drm_buf_entry *entry;
1268
1269	if (drm_core_check_feature(dev, DRIVER_MODESET))
1270		return -EINVAL;
1271
1272	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1273		return -EINVAL;
1274
1275	if (!dma)
1276		return -EINVAL;
1277
1278	DRM_DEBUG("%d, %d, %d\n",
1279		  request->size, request->low_mark, request->high_mark);
1280	order = order_base_2(request->size);
1281	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1282		return -EINVAL;
1283	entry = &dma->bufs[order];
1284
1285	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1286		return -EINVAL;
1287	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1288		return -EINVAL;
1289
1290	entry->low_mark = request->low_mark;
1291	entry->high_mark = request->high_mark;
1292
1293	return 0;
1294}
1295
1296/**
1297 * Unreserve the buffers in list, previously reserved using drmDMA.
1298 *
1299 * \param inode device inode.
1300 * \param file_priv DRM file private.
1301 * \param cmd command.
1302 * \param arg pointer to a drm_buf_free structure.
1303 * \return zero on success or a negative number on failure.
1304 *
1305 * Calls free_buffer() for each used buffer.
1306 * This function is primarily used for debugging.
1307 */
1308int drm_legacy_freebufs(struct drm_device *dev, void *data,
1309			struct drm_file *file_priv)
1310{
1311	struct drm_device_dma *dma = dev->dma;
1312	struct drm_buf_free *request = data;
1313	int i;
1314	int idx;
1315	struct drm_buf *buf;
1316
1317	if (drm_core_check_feature(dev, DRIVER_MODESET))
1318		return -EINVAL;
1319
1320	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1321		return -EINVAL;
1322
1323	if (!dma)
1324		return -EINVAL;
1325
1326	DRM_DEBUG("%d\n", request->count);
1327	for (i = 0; i < request->count; i++) {
1328		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1329			return -EFAULT;
1330		if (idx < 0 || idx >= dma->buf_count) {
1331			DRM_ERROR("Index %d (of %d max)\n",
1332				  idx, dma->buf_count - 1);
1333			return -EINVAL;
1334		}
1335		buf = dma->buflist[idx];
1336		if (buf->file_priv != file_priv) {
1337			DRM_ERROR("Process %d freeing buffer not owned\n",
1338				  task_pid_nr(current));
1339			return -EINVAL;
1340		}
1341		drm_legacy_free_buffer(dev, buf);
1342	}
1343
1344	return 0;
1345}
1346
1347/**
1348 * Maps all of the DMA buffers into client-virtual space (ioctl).
1349 *
1350 * \param inode device inode.
1351 * \param file_priv DRM file private.
1352 * \param cmd command.
1353 * \param arg pointer to a drm_buf_map structure.
1354 * \return zero on success or a negative number on failure.
1355 *
1356 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1357 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1358 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1359 * drm_mmap_dma().
1360 */
1361int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1362		       struct drm_file *file_priv)
1363{
1364	struct drm_device_dma *dma = dev->dma;
1365	int retcode = 0;
1366	const int zero = 0;
1367	unsigned long virtual;
1368	unsigned long address;
1369	struct drm_buf_map *request = data;
1370	int i;
1371
1372	if (drm_core_check_feature(dev, DRIVER_MODESET))
1373		return -EINVAL;
1374
1375	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1376		return -EINVAL;
1377
1378	if (!dma)
1379		return -EINVAL;
1380
1381	spin_lock(&dev->buf_lock);
1382	if (atomic_read(&dev->buf_alloc)) {
1383		spin_unlock(&dev->buf_lock);
1384		return -EBUSY;
1385	}
1386	dev->buf_use++;		/* Can't allocate more after this call */
1387	spin_unlock(&dev->buf_lock);
1388
1389	if (request->count >= dma->buf_count) {
1390		if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1391		    || (drm_core_check_feature(dev, DRIVER_SG)
1392			&& (dma->flags & _DRM_DMA_USE_SG))) {
 
 
1393			struct drm_local_map *map = dev->agp_buffer_map;
1394			unsigned long token = dev->agp_buffer_token;
1395
1396			if (!map) {
1397				retcode = -EINVAL;
1398				goto done;
1399			}
1400			virtual = vm_mmap(file_priv->filp, 0, map->size,
1401					  PROT_READ | PROT_WRITE,
1402					  MAP_SHARED,
1403					  token);
1404		} else {
1405			virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1406					  PROT_READ | PROT_WRITE,
1407					  MAP_SHARED, 0);
1408		}
1409		if (virtual > -1024UL) {
1410			/* Real error */
1411			retcode = (signed long)virtual;
1412			goto done;
1413		}
1414		request->virtual = (void __user *)virtual;
1415
1416		for (i = 0; i < dma->buf_count; i++) {
1417			if (copy_to_user(&request->list[i].idx,
1418					 &dma->buflist[i]->idx,
1419					 sizeof(request->list[0].idx))) {
1420				retcode = -EFAULT;
1421				goto done;
1422			}
1423			if (copy_to_user(&request->list[i].total,
1424					 &dma->buflist[i]->total,
1425					 sizeof(request->list[0].total))) {
1426				retcode = -EFAULT;
1427				goto done;
1428			}
1429			if (copy_to_user(&request->list[i].used,
1430					 &zero, sizeof(zero))) {
1431				retcode = -EFAULT;
1432				goto done;
1433			}
1434			address = virtual + dma->buflist[i]->offset;	/* *** */
1435			if (copy_to_user(&request->list[i].address,
1436					 &address, sizeof(address))) {
1437				retcode = -EFAULT;
1438				goto done;
1439			}
1440		}
1441	}
1442      done:
1443	request->count = dma->buf_count;
1444	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1445
1446	return retcode;
1447}
1448
1449int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1450		  struct drm_file *file_priv)
 
 
 
 
 
 
 
 
1451{
1452	if (drm_core_check_feature(dev, DRIVER_MODESET))
1453		return -EINVAL;
1454
1455	if (dev->driver->dma_ioctl)
1456		return dev->driver->dma_ioctl(dev, data, file_priv);
1457	else
1458		return -EINVAL;
1459}
1460
1461struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1462{
1463	struct drm_map_list *entry;
1464
1465	list_for_each_entry(entry, &dev->maplist, head) {
1466		if (entry->map && entry->map->type == _DRM_SHM &&
1467		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1468			return entry->map;
1469		}
1470	}
1471	return NULL;
1472}
1473EXPORT_SYMBOL(drm_legacy_getsarea);
v3.5.6
   1/**
   2 * \file drm_bufs.c
   3 * Generic buffer template
   4 *
   5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
   6 * \author Gareth Hughes <gareth@valinux.com>
   7 */
   8
   9/*
  10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
  11 *
  12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  14 * All Rights Reserved.
  15 *
 
 
 
  16 * Permission is hereby granted, free of charge, to any person obtaining a
  17 * copy of this software and associated documentation files (the "Software"),
  18 * to deal in the Software without restriction, including without limitation
  19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  20 * and/or sell copies of the Software, and to permit persons to whom the
  21 * Software is furnished to do so, subject to the following conditions:
  22 *
  23 * The above copyright notice and this permission notice (including the next
  24 * paragraph) shall be included in all copies or substantial portions of the
  25 * Software.
  26 *
  27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  33 * OTHER DEALINGS IN THE SOFTWARE.
  34 */
  35
  36#include <linux/vmalloc.h>
  37#include <linux/slab.h>
  38#include <linux/log2.h>
  39#include <linux/export.h>
  40#include <asm/shmparam.h>
  41#include "drmP.h"
 
  42
  43static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
  44						  struct drm_local_map *map)
  45{
  46	struct drm_map_list *entry;
  47	list_for_each_entry(entry, &dev->maplist, head) {
  48		/*
  49		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
  50		 * while PCI resources may live above that, we only compare the
  51		 * lower 32 bits of the map offset for maps of type
  52		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
  53		 * It is assumed that if a driver have more than one resource
  54		 * of each type, the lower 32 bits are different.
  55		 */
  56		if (!entry->map ||
  57		    map->type != entry->map->type ||
  58		    entry->master != dev->primary->master)
  59			continue;
  60		switch (map->type) {
  61		case _DRM_SHM:
  62			if (map->flags != _DRM_CONTAINS_LOCK)
  63				break;
  64			return entry;
  65		case _DRM_REGISTERS:
  66		case _DRM_FRAME_BUFFER:
  67			if ((entry->map->offset & 0xffffffff) ==
  68			    (map->offset & 0xffffffff))
  69				return entry;
  70		default: /* Make gcc happy */
  71			;
  72		}
  73		if (entry->map->offset == map->offset)
  74			return entry;
  75	}
  76
  77	return NULL;
  78}
  79
  80static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
  81			  unsigned long user_token, int hashed_handle, int shm)
  82{
  83	int use_hashed_handle, shift;
  84	unsigned long add;
  85
  86#if (BITS_PER_LONG == 64)
  87	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
  88#elif (BITS_PER_LONG == 32)
  89	use_hashed_handle = hashed_handle;
  90#else
  91#error Unsupported long size. Neither 64 nor 32 bits.
  92#endif
  93
  94	if (!use_hashed_handle) {
  95		int ret;
  96		hash->key = user_token >> PAGE_SHIFT;
  97		ret = drm_ht_insert_item(&dev->map_hash, hash);
  98		if (ret != -EINVAL)
  99			return ret;
 100	}
 101
 102	shift = 0;
 103	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
 104	if (shm && (SHMLBA > PAGE_SIZE)) {
 105		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
 106
 107		/* For shared memory, we have to preserve the SHMLBA
 108		 * bits of the eventual vma->vm_pgoff value during
 109		 * mmap().  Otherwise we run into cache aliasing problems
 110		 * on some platforms.  On these platforms, the pgoff of
 111		 * a mmap() request is used to pick a suitable virtual
 112		 * address for the mmap() region such that it will not
 113		 * cause cache aliasing problems.
 114		 *
 115		 * Therefore, make sure the SHMLBA relevant bits of the
 116		 * hash value we use are equal to those in the original
 117		 * kernel virtual address.
 118		 */
 119		shift = bits;
 120		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
 121	}
 122
 123	return drm_ht_just_insert_please(&dev->map_hash, hash,
 124					 user_token, 32 - PAGE_SHIFT - 3,
 125					 shift, add);
 126}
 127
 128/**
 129 * Core function to create a range of memory available for mapping by a
 130 * non-root process.
 131 *
 132 * Adjusts the memory offset to its absolute value according to the mapping
 133 * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
 134 * applicable and if supported by the kernel.
 135 */
 136static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
 137			   unsigned int size, enum drm_map_type type,
 138			   enum drm_map_flags flags,
 139			   struct drm_map_list ** maplist)
 140{
 141	struct drm_local_map *map;
 142	struct drm_map_list *list;
 143	drm_dma_handle_t *dmah;
 144	unsigned long user_token;
 145	int ret;
 146
 147	map = kmalloc(sizeof(*map), GFP_KERNEL);
 148	if (!map)
 149		return -ENOMEM;
 150
 151	map->offset = offset;
 152	map->size = size;
 153	map->flags = flags;
 154	map->type = type;
 155
 156	/* Only allow shared memory to be removable since we only keep enough
 157	 * book keeping information about shared memory to allow for removal
 158	 * when processes fork.
 159	 */
 160	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
 161		kfree(map);
 162		return -EINVAL;
 163	}
 164	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
 165		  (unsigned long long)map->offset, map->size, map->type);
 166
 167	/* page-align _DRM_SHM maps. They are allocated here so there is no security
 168	 * hole created by that and it works around various broken drivers that use
 169	 * a non-aligned quantity to map the SAREA. --BenH
 170	 */
 171	if (map->type == _DRM_SHM)
 172		map->size = PAGE_ALIGN(map->size);
 173
 174	if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
 175		kfree(map);
 176		return -EINVAL;
 177	}
 178	map->mtrr = -1;
 179	map->handle = NULL;
 180
 181	switch (map->type) {
 182	case _DRM_REGISTERS:
 183	case _DRM_FRAME_BUFFER:
 184#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
 185		if (map->offset + (map->size-1) < map->offset ||
 186		    map->offset < virt_to_phys(high_memory)) {
 187			kfree(map);
 188			return -EINVAL;
 189		}
 190#endif
 191		/* Some drivers preinitialize some maps, without the X Server
 192		 * needing to be aware of it.  Therefore, we just return success
 193		 * when the server tries to create a duplicate map.
 194		 */
 195		list = drm_find_matching_map(dev, map);
 196		if (list != NULL) {
 197			if (list->map->size != map->size) {
 198				DRM_DEBUG("Matching maps of type %d with "
 199					  "mismatched sizes, (%ld vs %ld)\n",
 200					  map->type, map->size,
 201					  list->map->size);
 202				list->map->size = map->size;
 203			}
 204
 205			kfree(map);
 206			*maplist = list;
 207			return 0;
 208		}
 209
 210		if (drm_core_has_MTRR(dev)) {
 211			if (map->type == _DRM_FRAME_BUFFER ||
 212			    (map->flags & _DRM_WRITE_COMBINING)) {
 213				map->mtrr = mtrr_add(map->offset, map->size,
 214						     MTRR_TYPE_WRCOMB, 1);
 215			}
 216		}
 217		if (map->type == _DRM_REGISTERS) {
 218			map->handle = ioremap(map->offset, map->size);
 
 
 
 
 219			if (!map->handle) {
 220				kfree(map);
 221				return -ENOMEM;
 222			}
 223		}
 224
 225		break;
 226	case _DRM_SHM:
 227		list = drm_find_matching_map(dev, map);
 228		if (list != NULL) {
 229			if(list->map->size != map->size) {
 230				DRM_DEBUG("Matching maps of type %d with "
 231					  "mismatched sizes, (%ld vs %ld)\n",
 232					  map->type, map->size, list->map->size);
 233				list->map->size = map->size;
 234			}
 235
 236			kfree(map);
 237			*maplist = list;
 238			return 0;
 239		}
 240		map->handle = vmalloc_user(map->size);
 241		DRM_DEBUG("%lu %d %p\n",
 242			  map->size, drm_order(map->size), map->handle);
 243		if (!map->handle) {
 244			kfree(map);
 245			return -ENOMEM;
 246		}
 247		map->offset = (unsigned long)map->handle;
 248		if (map->flags & _DRM_CONTAINS_LOCK) {
 249			/* Prevent a 2nd X Server from creating a 2nd lock */
 250			if (dev->primary->master->lock.hw_lock != NULL) {
 251				vfree(map->handle);
 252				kfree(map);
 253				return -EBUSY;
 254			}
 255			dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;	/* Pointer to lock */
 256		}
 257		break;
 258	case _DRM_AGP: {
 259		struct drm_agp_mem *entry;
 260		int valid = 0;
 261
 262		if (!drm_core_has_AGP(dev)) {
 263			kfree(map);
 264			return -EINVAL;
 265		}
 266#ifdef __alpha__
 267		map->offset += dev->hose->mem_space->start;
 268#endif
 269		/* In some cases (i810 driver), user space may have already
 270		 * added the AGP base itself, because dev->agp->base previously
 271		 * only got set during AGP enable.  So, only add the base
 272		 * address if the map's offset isn't already within the
 273		 * aperture.
 274		 */
 275		if (map->offset < dev->agp->base ||
 276		    map->offset > dev->agp->base +
 277		    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
 278			map->offset += dev->agp->base;
 279		}
 280		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
 281
 282		/* This assumes the DRM is in total control of AGP space.
 283		 * It's not always the case as AGP can be in the control
 284		 * of user space (i.e. i810 driver). So this loop will get
 285		 * skipped and we double check that dev->agp->memory is
 286		 * actually set as well as being invalid before EPERM'ing
 287		 */
 288		list_for_each_entry(entry, &dev->agp->memory, head) {
 289			if ((map->offset >= entry->bound) &&
 290			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
 291				valid = 1;
 292				break;
 293			}
 294		}
 295		if (!list_empty(&dev->agp->memory) && !valid) {
 296			kfree(map);
 297			return -EPERM;
 298		}
 299		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
 300			  (unsigned long long)map->offset, map->size);
 301
 302		break;
 303	}
 304	case _DRM_GEM:
 305		DRM_ERROR("tried to addmap GEM object\n");
 306		break;
 307	case _DRM_SCATTER_GATHER:
 308		if (!dev->sg) {
 309			kfree(map);
 310			return -EINVAL;
 311		}
 312		map->offset += (unsigned long)dev->sg->virtual;
 313		break;
 314	case _DRM_CONSISTENT:
 315		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
 316		 * As we're limiting the address to 2^32-1 (or less),
 317		 * casting it down to 32 bits is no problem, but we
 318		 * need to point to a 64bit variable first. */
 319		dmah = drm_pci_alloc(dev, map->size, map->size);
 320		if (!dmah) {
 321			kfree(map);
 322			return -ENOMEM;
 323		}
 324		map->handle = dmah->vaddr;
 325		map->offset = (unsigned long)dmah->busaddr;
 326		kfree(dmah);
 327		break;
 328	default:
 329		kfree(map);
 330		return -EINVAL;
 331	}
 332
 333	list = kzalloc(sizeof(*list), GFP_KERNEL);
 334	if (!list) {
 335		if (map->type == _DRM_REGISTERS)
 336			iounmap(map->handle);
 337		kfree(map);
 338		return -EINVAL;
 339	}
 340	list->map = map;
 341
 342	mutex_lock(&dev->struct_mutex);
 343	list_add(&list->head, &dev->maplist);
 344
 345	/* Assign a 32-bit handle */
 346	/* We do it here so that dev->struct_mutex protects the increment */
 347	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
 348		map->offset;
 349	ret = drm_map_handle(dev, &list->hash, user_token, 0,
 350			     (map->type == _DRM_SHM));
 351	if (ret) {
 352		if (map->type == _DRM_REGISTERS)
 353			iounmap(map->handle);
 354		kfree(map);
 355		kfree(list);
 356		mutex_unlock(&dev->struct_mutex);
 357		return ret;
 358	}
 359
 360	list->user_token = list->hash.key << PAGE_SHIFT;
 361	mutex_unlock(&dev->struct_mutex);
 362
 363	if (!(map->flags & _DRM_DRIVER))
 364		list->master = dev->primary->master;
 365	*maplist = list;
 366	return 0;
 367	}
 368
 369int drm_addmap(struct drm_device * dev, resource_size_t offset,
 370	       unsigned int size, enum drm_map_type type,
 371	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
 372{
 373	struct drm_map_list *list;
 374	int rc;
 375
 376	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
 377	if (!rc)
 378		*map_ptr = list->map;
 379	return rc;
 380}
 381
 382EXPORT_SYMBOL(drm_addmap);
 383
 384/**
 385 * Ioctl to specify a range of memory that is available for mapping by a
 386 * non-root process.
 387 *
 388 * \param inode device inode.
 389 * \param file_priv DRM file private.
 390 * \param cmd command.
 391 * \param arg pointer to a drm_map structure.
 392 * \return zero on success or a negative value on error.
 393 *
 394 */
 395int drm_addmap_ioctl(struct drm_device *dev, void *data,
 396		     struct drm_file *file_priv)
 397{
 398	struct drm_map *map = data;
 399	struct drm_map_list *maplist;
 400	int err;
 401
 402	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
 403		return -EPERM;
 404
 405	err = drm_addmap_core(dev, map->offset, map->size, map->type,
 406			      map->flags, &maplist);
 407
 408	if (err)
 409		return err;
 410
 411	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
 412	map->handle = (void *)(unsigned long)maplist->user_token;
 
 
 
 
 
 
 
 
 
 413	return 0;
 414}
 415
 416/**
 417 * Remove a map private from list and deallocate resources if the mapping
 418 * isn't in use.
 419 *
 420 * Searches the map on drm_device::maplist, removes it from the list, see if
 421 * its being used, and free any associate resource (such as MTRR's) if it's not
 422 * being on use.
 423 *
 424 * \sa drm_addmap
 425 */
 426int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 427{
 428	struct drm_map_list *r_list = NULL, *list_t;
 429	drm_dma_handle_t dmah;
 430	int found = 0;
 431	struct drm_master *master;
 432
 433	/* Find the list entry for the map and remove it */
 434	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
 435		if (r_list->map == map) {
 436			master = r_list->master;
 437			list_del(&r_list->head);
 438			drm_ht_remove_key(&dev->map_hash,
 439					  r_list->user_token >> PAGE_SHIFT);
 440			kfree(r_list);
 441			found = 1;
 442			break;
 443		}
 444	}
 445
 446	if (!found)
 447		return -EINVAL;
 448
 449	switch (map->type) {
 450	case _DRM_REGISTERS:
 451		iounmap(map->handle);
 452		/* FALLTHROUGH */
 453	case _DRM_FRAME_BUFFER:
 454		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
 455			int retcode;
 456			retcode = mtrr_del(map->mtrr, map->offset, map->size);
 457			DRM_DEBUG("mtrr_del=%d\n", retcode);
 458		}
 459		break;
 460	case _DRM_SHM:
 461		vfree(map->handle);
 462		if (master) {
 463			if (dev->sigdata.lock == master->lock.hw_lock)
 464				dev->sigdata.lock = NULL;
 465			master->lock.hw_lock = NULL;   /* SHM removed */
 466			master->lock.file_priv = NULL;
 467			wake_up_interruptible_all(&master->lock.lock_queue);
 468		}
 469		break;
 470	case _DRM_AGP:
 471	case _DRM_SCATTER_GATHER:
 472		break;
 473	case _DRM_CONSISTENT:
 474		dmah.vaddr = map->handle;
 475		dmah.busaddr = map->offset;
 476		dmah.size = map->size;
 477		__drm_pci_free(dev, &dmah);
 478		break;
 479	case _DRM_GEM:
 480		DRM_ERROR("tried to rmmap GEM object\n");
 481		break;
 482	}
 483	kfree(map);
 484
 485	return 0;
 486}
 487EXPORT_SYMBOL(drm_rmmap_locked);
 488
 489int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
 490{
 491	int ret;
 492
 493	mutex_lock(&dev->struct_mutex);
 494	ret = drm_rmmap_locked(dev, map);
 495	mutex_unlock(&dev->struct_mutex);
 496
 497	return ret;
 498}
 499EXPORT_SYMBOL(drm_rmmap);
 500
 501/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
 502 * the last close of the device, and this is necessary for cleanup when things
 503 * exit uncleanly.  Therefore, having userland manually remove mappings seems
 504 * like a pointless exercise since they're going away anyway.
 505 *
 506 * One use case might be after addmap is allowed for normal users for SHM and
 507 * gets used by drivers that the server doesn't need to care about.  This seems
 508 * unlikely.
 509 *
 510 * \param inode device inode.
 511 * \param file_priv DRM file private.
 512 * \param cmd command.
 513 * \param arg pointer to a struct drm_map structure.
 514 * \return zero on success or a negative value on error.
 515 */
 516int drm_rmmap_ioctl(struct drm_device *dev, void *data,
 517		    struct drm_file *file_priv)
 518{
 519	struct drm_map *request = data;
 520	struct drm_local_map *map = NULL;
 521	struct drm_map_list *r_list;
 522	int ret;
 523
 524	mutex_lock(&dev->struct_mutex);
 525	list_for_each_entry(r_list, &dev->maplist, head) {
 526		if (r_list->map &&
 527		    r_list->user_token == (unsigned long)request->handle &&
 528		    r_list->map->flags & _DRM_REMOVABLE) {
 529			map = r_list->map;
 530			break;
 531		}
 532	}
 533
 534	/* List has wrapped around to the head pointer, or its empty we didn't
 535	 * find anything.
 536	 */
 537	if (list_empty(&dev->maplist) || !map) {
 538		mutex_unlock(&dev->struct_mutex);
 539		return -EINVAL;
 540	}
 541
 542	/* Register and framebuffer maps are permanent */
 543	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
 544		mutex_unlock(&dev->struct_mutex);
 545		return 0;
 546	}
 547
 548	ret = drm_rmmap_locked(dev, map);
 549
 550	mutex_unlock(&dev->struct_mutex);
 551
 552	return ret;
 553}
 554
 555/**
 556 * Cleanup after an error on one of the addbufs() functions.
 557 *
 558 * \param dev DRM device.
 559 * \param entry buffer entry where the error occurred.
 560 *
 561 * Frees any pages and buffers associated with the given entry.
 562 */
 563static void drm_cleanup_buf_error(struct drm_device * dev,
 564				  struct drm_buf_entry * entry)
 565{
 566	int i;
 567
 568	if (entry->seg_count) {
 569		for (i = 0; i < entry->seg_count; i++) {
 570			if (entry->seglist[i]) {
 571				drm_pci_free(dev, entry->seglist[i]);
 572			}
 573		}
 574		kfree(entry->seglist);
 575
 576		entry->seg_count = 0;
 577	}
 578
 579	if (entry->buf_count) {
 580		for (i = 0; i < entry->buf_count; i++) {
 581			kfree(entry->buflist[i].dev_private);
 582		}
 583		kfree(entry->buflist);
 584
 585		entry->buf_count = 0;
 586	}
 587}
 588
 589#if __OS_HAS_AGP
 590/**
 591 * Add AGP buffers for DMA transfers.
 592 *
 593 * \param dev struct drm_device to which the buffers are to be added.
 594 * \param request pointer to a struct drm_buf_desc describing the request.
 595 * \return zero on success or a negative number on failure.
 596 *
 597 * After some sanity checks creates a drm_buf structure for each buffer and
 598 * reallocates the buffer list of the same size order to accommodate the new
 599 * buffers.
 600 */
 601int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
 
 602{
 603	struct drm_device_dma *dma = dev->dma;
 604	struct drm_buf_entry *entry;
 605	struct drm_agp_mem *agp_entry;
 606	struct drm_buf *buf;
 607	unsigned long offset;
 608	unsigned long agp_offset;
 609	int count;
 610	int order;
 611	int size;
 612	int alignment;
 613	int page_order;
 614	int total;
 615	int byte_count;
 616	int i, valid;
 617	struct drm_buf **temp_buflist;
 618
 619	if (!dma)
 620		return -EINVAL;
 621
 622	count = request->count;
 623	order = drm_order(request->size);
 624	size = 1 << order;
 625
 626	alignment = (request->flags & _DRM_PAGE_ALIGN)
 627	    ? PAGE_ALIGN(size) : size;
 628	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 629	total = PAGE_SIZE << page_order;
 630
 631	byte_count = 0;
 632	agp_offset = dev->agp->base + request->agp_start;
 633
 634	DRM_DEBUG("count:      %d\n", count);
 635	DRM_DEBUG("order:      %d\n", order);
 636	DRM_DEBUG("size:       %d\n", size);
 637	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
 638	DRM_DEBUG("alignment:  %d\n", alignment);
 639	DRM_DEBUG("page_order: %d\n", page_order);
 640	DRM_DEBUG("total:      %d\n", total);
 641
 642	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 643		return -EINVAL;
 644	if (dev->queue_count)
 645		return -EBUSY;	/* Not while in use */
 646
 647	/* Make sure buffers are located in AGP memory that we own */
 648	valid = 0;
 649	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
 650		if ((agp_offset >= agp_entry->bound) &&
 651		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
 652			valid = 1;
 653			break;
 654		}
 655	}
 656	if (!list_empty(&dev->agp->memory) && !valid) {
 657		DRM_DEBUG("zone invalid\n");
 658		return -EINVAL;
 659	}
 660	spin_lock(&dev->count_lock);
 661	if (dev->buf_use) {
 662		spin_unlock(&dev->count_lock);
 663		return -EBUSY;
 664	}
 665	atomic_inc(&dev->buf_alloc);
 666	spin_unlock(&dev->count_lock);
 667
 668	mutex_lock(&dev->struct_mutex);
 669	entry = &dma->bufs[order];
 670	if (entry->buf_count) {
 671		mutex_unlock(&dev->struct_mutex);
 672		atomic_dec(&dev->buf_alloc);
 673		return -ENOMEM;	/* May only call once for each order */
 674	}
 675
 676	if (count < 0 || count > 4096) {
 677		mutex_unlock(&dev->struct_mutex);
 678		atomic_dec(&dev->buf_alloc);
 679		return -EINVAL;
 680	}
 681
 682	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
 683	if (!entry->buflist) {
 684		mutex_unlock(&dev->struct_mutex);
 685		atomic_dec(&dev->buf_alloc);
 686		return -ENOMEM;
 687	}
 688
 689	entry->buf_size = size;
 690	entry->page_order = page_order;
 691
 692	offset = 0;
 693
 694	while (entry->buf_count < count) {
 695		buf = &entry->buflist[entry->buf_count];
 696		buf->idx = dma->buf_count + entry->buf_count;
 697		buf->total = alignment;
 698		buf->order = order;
 699		buf->used = 0;
 700
 701		buf->offset = (dma->byte_count + offset);
 702		buf->bus_address = agp_offset + offset;
 703		buf->address = (void *)(agp_offset + offset);
 704		buf->next = NULL;
 705		buf->waiting = 0;
 706		buf->pending = 0;
 707		init_waitqueue_head(&buf->dma_wait);
 708		buf->file_priv = NULL;
 709
 710		buf->dev_priv_size = dev->driver->dev_priv_size;
 711		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
 712		if (!buf->dev_private) {
 713			/* Set count correctly so we free the proper amount. */
 714			entry->buf_count = count;
 715			drm_cleanup_buf_error(dev, entry);
 716			mutex_unlock(&dev->struct_mutex);
 717			atomic_dec(&dev->buf_alloc);
 718			return -ENOMEM;
 719		}
 720
 721		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
 722
 723		offset += alignment;
 724		entry->buf_count++;
 725		byte_count += PAGE_SIZE << page_order;
 726	}
 727
 728	DRM_DEBUG("byte_count: %d\n", byte_count);
 729
 730	temp_buflist = krealloc(dma->buflist,
 731				(dma->buf_count + entry->buf_count) *
 732				sizeof(*dma->buflist), GFP_KERNEL);
 733	if (!temp_buflist) {
 734		/* Free the entry because it isn't valid */
 735		drm_cleanup_buf_error(dev, entry);
 736		mutex_unlock(&dev->struct_mutex);
 737		atomic_dec(&dev->buf_alloc);
 738		return -ENOMEM;
 739	}
 740	dma->buflist = temp_buflist;
 741
 742	for (i = 0; i < entry->buf_count; i++) {
 743		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 744	}
 745
 746	dma->buf_count += entry->buf_count;
 747	dma->seg_count += entry->seg_count;
 748	dma->page_count += byte_count >> PAGE_SHIFT;
 749	dma->byte_count += byte_count;
 750
 751	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
 752	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 753
 754	mutex_unlock(&dev->struct_mutex);
 755
 756	request->count = entry->buf_count;
 757	request->size = size;
 758
 759	dma->flags = _DRM_DMA_USE_AGP;
 760
 761	atomic_dec(&dev->buf_alloc);
 762	return 0;
 763}
 764EXPORT_SYMBOL(drm_addbufs_agp);
 765#endif				/* __OS_HAS_AGP */
 766
 767int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
 
 768{
 769	struct drm_device_dma *dma = dev->dma;
 770	int count;
 771	int order;
 772	int size;
 773	int total;
 774	int page_order;
 775	struct drm_buf_entry *entry;
 776	drm_dma_handle_t *dmah;
 777	struct drm_buf *buf;
 778	int alignment;
 779	unsigned long offset;
 780	int i;
 781	int byte_count;
 782	int page_count;
 783	unsigned long *temp_pagelist;
 784	struct drm_buf **temp_buflist;
 785
 786	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
 787		return -EINVAL;
 788
 789	if (!dma)
 790		return -EINVAL;
 791
 792	if (!capable(CAP_SYS_ADMIN))
 793		return -EPERM;
 794
 795	count = request->count;
 796	order = drm_order(request->size);
 797	size = 1 << order;
 798
 799	DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
 800		  request->count, request->size, size, order, dev->queue_count);
 801
 802	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 803		return -EINVAL;
 804	if (dev->queue_count)
 805		return -EBUSY;	/* Not while in use */
 806
 807	alignment = (request->flags & _DRM_PAGE_ALIGN)
 808	    ? PAGE_ALIGN(size) : size;
 809	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 810	total = PAGE_SIZE << page_order;
 811
 812	spin_lock(&dev->count_lock);
 813	if (dev->buf_use) {
 814		spin_unlock(&dev->count_lock);
 815		return -EBUSY;
 816	}
 817	atomic_inc(&dev->buf_alloc);
 818	spin_unlock(&dev->count_lock);
 819
 820	mutex_lock(&dev->struct_mutex);
 821	entry = &dma->bufs[order];
 822	if (entry->buf_count) {
 823		mutex_unlock(&dev->struct_mutex);
 824		atomic_dec(&dev->buf_alloc);
 825		return -ENOMEM;	/* May only call once for each order */
 826	}
 827
 828	if (count < 0 || count > 4096) {
 829		mutex_unlock(&dev->struct_mutex);
 830		atomic_dec(&dev->buf_alloc);
 831		return -EINVAL;
 832	}
 833
 834	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
 835	if (!entry->buflist) {
 836		mutex_unlock(&dev->struct_mutex);
 837		atomic_dec(&dev->buf_alloc);
 838		return -ENOMEM;
 839	}
 840
 841	entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
 842	if (!entry->seglist) {
 843		kfree(entry->buflist);
 844		mutex_unlock(&dev->struct_mutex);
 845		atomic_dec(&dev->buf_alloc);
 846		return -ENOMEM;
 847	}
 848
 849	/* Keep the original pagelist until we know all the allocations
 850	 * have succeeded
 851	 */
 852	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
 853			       sizeof(*dma->pagelist), GFP_KERNEL);
 854	if (!temp_pagelist) {
 855		kfree(entry->buflist);
 856		kfree(entry->seglist);
 857		mutex_unlock(&dev->struct_mutex);
 858		atomic_dec(&dev->buf_alloc);
 859		return -ENOMEM;
 860	}
 861	memcpy(temp_pagelist,
 862	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
 863	DRM_DEBUG("pagelist: %d entries\n",
 864		  dma->page_count + (count << page_order));
 865
 866	entry->buf_size = size;
 867	entry->page_order = page_order;
 868	byte_count = 0;
 869	page_count = 0;
 870
 871	while (entry->buf_count < count) {
 872
 873		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
 874
 875		if (!dmah) {
 876			/* Set count correctly so we free the proper amount. */
 877			entry->buf_count = count;
 878			entry->seg_count = count;
 879			drm_cleanup_buf_error(dev, entry);
 880			kfree(temp_pagelist);
 881			mutex_unlock(&dev->struct_mutex);
 882			atomic_dec(&dev->buf_alloc);
 883			return -ENOMEM;
 884		}
 885		entry->seglist[entry->seg_count++] = dmah;
 886		for (i = 0; i < (1 << page_order); i++) {
 887			DRM_DEBUG("page %d @ 0x%08lx\n",
 888				  dma->page_count + page_count,
 889				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
 890			temp_pagelist[dma->page_count + page_count++]
 891				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
 892		}
 893		for (offset = 0;
 894		     offset + size <= total && entry->buf_count < count;
 895		     offset += alignment, ++entry->buf_count) {
 896			buf = &entry->buflist[entry->buf_count];
 897			buf->idx = dma->buf_count + entry->buf_count;
 898			buf->total = alignment;
 899			buf->order = order;
 900			buf->used = 0;
 901			buf->offset = (dma->byte_count + byte_count + offset);
 902			buf->address = (void *)(dmah->vaddr + offset);
 903			buf->bus_address = dmah->busaddr + offset;
 904			buf->next = NULL;
 905			buf->waiting = 0;
 906			buf->pending = 0;
 907			init_waitqueue_head(&buf->dma_wait);
 908			buf->file_priv = NULL;
 909
 910			buf->dev_priv_size = dev->driver->dev_priv_size;
 911			buf->dev_private = kzalloc(buf->dev_priv_size,
 912						GFP_KERNEL);
 913			if (!buf->dev_private) {
 914				/* Set count correctly so we free the proper amount. */
 915				entry->buf_count = count;
 916				entry->seg_count = count;
 917				drm_cleanup_buf_error(dev, entry);
 918				kfree(temp_pagelist);
 919				mutex_unlock(&dev->struct_mutex);
 920				atomic_dec(&dev->buf_alloc);
 921				return -ENOMEM;
 922			}
 923
 924			DRM_DEBUG("buffer %d @ %p\n",
 925				  entry->buf_count, buf->address);
 926		}
 927		byte_count += PAGE_SIZE << page_order;
 928	}
 929
 930	temp_buflist = krealloc(dma->buflist,
 931				(dma->buf_count + entry->buf_count) *
 932				sizeof(*dma->buflist), GFP_KERNEL);
 933	if (!temp_buflist) {
 934		/* Free the entry because it isn't valid */
 935		drm_cleanup_buf_error(dev, entry);
 936		kfree(temp_pagelist);
 937		mutex_unlock(&dev->struct_mutex);
 938		atomic_dec(&dev->buf_alloc);
 939		return -ENOMEM;
 940	}
 941	dma->buflist = temp_buflist;
 942
 943	for (i = 0; i < entry->buf_count; i++) {
 944		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 945	}
 946
 947	/* No allocations failed, so now we can replace the original pagelist
 948	 * with the new one.
 949	 */
 950	if (dma->page_count) {
 951		kfree(dma->pagelist);
 952	}
 953	dma->pagelist = temp_pagelist;
 954
 955	dma->buf_count += entry->buf_count;
 956	dma->seg_count += entry->seg_count;
 957	dma->page_count += entry->seg_count << page_order;
 958	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
 959
 960	mutex_unlock(&dev->struct_mutex);
 961
 962	request->count = entry->buf_count;
 963	request->size = size;
 964
 965	if (request->flags & _DRM_PCI_BUFFER_RO)
 966		dma->flags = _DRM_DMA_USE_PCI_RO;
 967
 968	atomic_dec(&dev->buf_alloc);
 969	return 0;
 970
 971}
 972EXPORT_SYMBOL(drm_addbufs_pci);
 973
 974static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
 
 975{
 976	struct drm_device_dma *dma = dev->dma;
 977	struct drm_buf_entry *entry;
 978	struct drm_buf *buf;
 979	unsigned long offset;
 980	unsigned long agp_offset;
 981	int count;
 982	int order;
 983	int size;
 984	int alignment;
 985	int page_order;
 986	int total;
 987	int byte_count;
 988	int i;
 989	struct drm_buf **temp_buflist;
 990
 991	if (!drm_core_check_feature(dev, DRIVER_SG))
 992		return -EINVAL;
 993
 994	if (!dma)
 995		return -EINVAL;
 996
 997	if (!capable(CAP_SYS_ADMIN))
 998		return -EPERM;
 999
1000	count = request->count;
1001	order = drm_order(request->size);
1002	size = 1 << order;
1003
1004	alignment = (request->flags & _DRM_PAGE_ALIGN)
1005	    ? PAGE_ALIGN(size) : size;
1006	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1007	total = PAGE_SIZE << page_order;
1008
1009	byte_count = 0;
1010	agp_offset = request->agp_start;
1011
1012	DRM_DEBUG("count:      %d\n", count);
1013	DRM_DEBUG("order:      %d\n", order);
1014	DRM_DEBUG("size:       %d\n", size);
1015	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1016	DRM_DEBUG("alignment:  %d\n", alignment);
1017	DRM_DEBUG("page_order: %d\n", page_order);
1018	DRM_DEBUG("total:      %d\n", total);
1019
1020	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1021		return -EINVAL;
1022	if (dev->queue_count)
1023		return -EBUSY;	/* Not while in use */
1024
1025	spin_lock(&dev->count_lock);
1026	if (dev->buf_use) {
1027		spin_unlock(&dev->count_lock);
1028		return -EBUSY;
1029	}
1030	atomic_inc(&dev->buf_alloc);
1031	spin_unlock(&dev->count_lock);
1032
1033	mutex_lock(&dev->struct_mutex);
1034	entry = &dma->bufs[order];
1035	if (entry->buf_count) {
1036		mutex_unlock(&dev->struct_mutex);
1037		atomic_dec(&dev->buf_alloc);
1038		return -ENOMEM;	/* May only call once for each order */
1039	}
1040
1041	if (count < 0 || count > 4096) {
1042		mutex_unlock(&dev->struct_mutex);
1043		atomic_dec(&dev->buf_alloc);
1044		return -EINVAL;
1045	}
1046
1047	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1048				GFP_KERNEL);
1049	if (!entry->buflist) {
1050		mutex_unlock(&dev->struct_mutex);
1051		atomic_dec(&dev->buf_alloc);
1052		return -ENOMEM;
1053	}
1054
1055	entry->buf_size = size;
1056	entry->page_order = page_order;
1057
1058	offset = 0;
1059
1060	while (entry->buf_count < count) {
1061		buf = &entry->buflist[entry->buf_count];
1062		buf->idx = dma->buf_count + entry->buf_count;
1063		buf->total = alignment;
1064		buf->order = order;
1065		buf->used = 0;
1066
1067		buf->offset = (dma->byte_count + offset);
1068		buf->bus_address = agp_offset + offset;
1069		buf->address = (void *)(agp_offset + offset
1070					+ (unsigned long)dev->sg->virtual);
1071		buf->next = NULL;
1072		buf->waiting = 0;
1073		buf->pending = 0;
1074		init_waitqueue_head(&buf->dma_wait);
1075		buf->file_priv = NULL;
1076
1077		buf->dev_priv_size = dev->driver->dev_priv_size;
1078		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1079		if (!buf->dev_private) {
1080			/* Set count correctly so we free the proper amount. */
1081			entry->buf_count = count;
1082			drm_cleanup_buf_error(dev, entry);
1083			mutex_unlock(&dev->struct_mutex);
1084			atomic_dec(&dev->buf_alloc);
1085			return -ENOMEM;
1086		}
1087
1088		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1089
1090		offset += alignment;
1091		entry->buf_count++;
1092		byte_count += PAGE_SIZE << page_order;
1093	}
1094
1095	DRM_DEBUG("byte_count: %d\n", byte_count);
1096
1097	temp_buflist = krealloc(dma->buflist,
1098				(dma->buf_count + entry->buf_count) *
1099				sizeof(*dma->buflist), GFP_KERNEL);
1100	if (!temp_buflist) {
1101		/* Free the entry because it isn't valid */
1102		drm_cleanup_buf_error(dev, entry);
1103		mutex_unlock(&dev->struct_mutex);
1104		atomic_dec(&dev->buf_alloc);
1105		return -ENOMEM;
1106	}
1107	dma->buflist = temp_buflist;
1108
1109	for (i = 0; i < entry->buf_count; i++) {
1110		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1111	}
1112
1113	dma->buf_count += entry->buf_count;
1114	dma->seg_count += entry->seg_count;
1115	dma->page_count += byte_count >> PAGE_SHIFT;
1116	dma->byte_count += byte_count;
1117
1118	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1119	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1120
1121	mutex_unlock(&dev->struct_mutex);
1122
1123	request->count = entry->buf_count;
1124	request->size = size;
1125
1126	dma->flags = _DRM_DMA_USE_SG;
1127
1128	atomic_dec(&dev->buf_alloc);
1129	return 0;
1130}
1131
1132static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1133{
1134	struct drm_device_dma *dma = dev->dma;
1135	struct drm_buf_entry *entry;
1136	struct drm_buf *buf;
1137	unsigned long offset;
1138	unsigned long agp_offset;
1139	int count;
1140	int order;
1141	int size;
1142	int alignment;
1143	int page_order;
1144	int total;
1145	int byte_count;
1146	int i;
1147	struct drm_buf **temp_buflist;
1148
1149	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1150		return -EINVAL;
1151
1152	if (!dma)
1153		return -EINVAL;
1154
1155	if (!capable(CAP_SYS_ADMIN))
1156		return -EPERM;
1157
1158	count = request->count;
1159	order = drm_order(request->size);
1160	size = 1 << order;
1161
1162	alignment = (request->flags & _DRM_PAGE_ALIGN)
1163	    ? PAGE_ALIGN(size) : size;
1164	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1165	total = PAGE_SIZE << page_order;
1166
1167	byte_count = 0;
1168	agp_offset = request->agp_start;
1169
1170	DRM_DEBUG("count:      %d\n", count);
1171	DRM_DEBUG("order:      %d\n", order);
1172	DRM_DEBUG("size:       %d\n", size);
1173	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1174	DRM_DEBUG("alignment:  %d\n", alignment);
1175	DRM_DEBUG("page_order: %d\n", page_order);
1176	DRM_DEBUG("total:      %d\n", total);
1177
1178	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1179		return -EINVAL;
1180	if (dev->queue_count)
1181		return -EBUSY;	/* Not while in use */
1182
1183	spin_lock(&dev->count_lock);
1184	if (dev->buf_use) {
1185		spin_unlock(&dev->count_lock);
1186		return -EBUSY;
1187	}
1188	atomic_inc(&dev->buf_alloc);
1189	spin_unlock(&dev->count_lock);
1190
1191	mutex_lock(&dev->struct_mutex);
1192	entry = &dma->bufs[order];
1193	if (entry->buf_count) {
1194		mutex_unlock(&dev->struct_mutex);
1195		atomic_dec(&dev->buf_alloc);
1196		return -ENOMEM;	/* May only call once for each order */
1197	}
1198
1199	if (count < 0 || count > 4096) {
1200		mutex_unlock(&dev->struct_mutex);
1201		atomic_dec(&dev->buf_alloc);
1202		return -EINVAL;
1203	}
1204
1205	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1206				GFP_KERNEL);
1207	if (!entry->buflist) {
1208		mutex_unlock(&dev->struct_mutex);
1209		atomic_dec(&dev->buf_alloc);
1210		return -ENOMEM;
1211	}
1212
1213	entry->buf_size = size;
1214	entry->page_order = page_order;
1215
1216	offset = 0;
1217
1218	while (entry->buf_count < count) {
1219		buf = &entry->buflist[entry->buf_count];
1220		buf->idx = dma->buf_count + entry->buf_count;
1221		buf->total = alignment;
1222		buf->order = order;
1223		buf->used = 0;
1224
1225		buf->offset = (dma->byte_count + offset);
1226		buf->bus_address = agp_offset + offset;
1227		buf->address = (void *)(agp_offset + offset);
1228		buf->next = NULL;
1229		buf->waiting = 0;
1230		buf->pending = 0;
1231		init_waitqueue_head(&buf->dma_wait);
1232		buf->file_priv = NULL;
1233
1234		buf->dev_priv_size = dev->driver->dev_priv_size;
1235		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1236		if (!buf->dev_private) {
1237			/* Set count correctly so we free the proper amount. */
1238			entry->buf_count = count;
1239			drm_cleanup_buf_error(dev, entry);
1240			mutex_unlock(&dev->struct_mutex);
1241			atomic_dec(&dev->buf_alloc);
1242			return -ENOMEM;
1243		}
1244
1245		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1246
1247		offset += alignment;
1248		entry->buf_count++;
1249		byte_count += PAGE_SIZE << page_order;
1250	}
1251
1252	DRM_DEBUG("byte_count: %d\n", byte_count);
1253
1254	temp_buflist = krealloc(dma->buflist,
1255				(dma->buf_count + entry->buf_count) *
1256				sizeof(*dma->buflist), GFP_KERNEL);
1257	if (!temp_buflist) {
1258		/* Free the entry because it isn't valid */
1259		drm_cleanup_buf_error(dev, entry);
1260		mutex_unlock(&dev->struct_mutex);
1261		atomic_dec(&dev->buf_alloc);
1262		return -ENOMEM;
1263	}
1264	dma->buflist = temp_buflist;
1265
1266	for (i = 0; i < entry->buf_count; i++) {
1267		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1268	}
1269
1270	dma->buf_count += entry->buf_count;
1271	dma->seg_count += entry->seg_count;
1272	dma->page_count += byte_count >> PAGE_SHIFT;
1273	dma->byte_count += byte_count;
1274
1275	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1276	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1277
1278	mutex_unlock(&dev->struct_mutex);
1279
1280	request->count = entry->buf_count;
1281	request->size = size;
1282
1283	dma->flags = _DRM_DMA_USE_FB;
1284
1285	atomic_dec(&dev->buf_alloc);
1286	return 0;
1287}
1288
1289
1290/**
1291 * Add buffers for DMA transfers (ioctl).
1292 *
1293 * \param inode device inode.
1294 * \param file_priv DRM file private.
1295 * \param cmd command.
1296 * \param arg pointer to a struct drm_buf_desc request.
1297 * \return zero on success or a negative number on failure.
1298 *
1299 * According with the memory type specified in drm_buf_desc::flags and the
1300 * build options, it dispatches the call either to addbufs_agp(),
1301 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1302 * PCI memory respectively.
1303 */
1304int drm_addbufs(struct drm_device *dev, void *data,
1305		struct drm_file *file_priv)
1306{
1307	struct drm_buf_desc *request = data;
1308	int ret;
1309
 
 
 
1310	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1311		return -EINVAL;
1312
1313#if __OS_HAS_AGP
1314	if (request->flags & _DRM_AGP_BUFFER)
1315		ret = drm_addbufs_agp(dev, request);
1316	else
1317#endif
1318	if (request->flags & _DRM_SG_BUFFER)
1319		ret = drm_addbufs_sg(dev, request);
1320	else if (request->flags & _DRM_FB_BUFFER)
1321		ret = drm_addbufs_fb(dev, request);
1322	else
1323		ret = drm_addbufs_pci(dev, request);
1324
1325	return ret;
1326}
1327
1328/**
1329 * Get information about the buffer mappings.
1330 *
1331 * This was originally mean for debugging purposes, or by a sophisticated
1332 * client library to determine how best to use the available buffers (e.g.,
1333 * large buffers can be used for image transfer).
1334 *
1335 * \param inode device inode.
1336 * \param file_priv DRM file private.
1337 * \param cmd command.
1338 * \param arg pointer to a drm_buf_info structure.
1339 * \return zero on success or a negative number on failure.
1340 *
1341 * Increments drm_device::buf_use while holding the drm_device::count_lock
1342 * lock, preventing of allocating more buffers after this call. Information
1343 * about each requested buffer is then copied into user space.
1344 */
1345int drm_infobufs(struct drm_device *dev, void *data,
1346		 struct drm_file *file_priv)
1347{
1348	struct drm_device_dma *dma = dev->dma;
1349	struct drm_buf_info *request = data;
1350	int i;
1351	int count;
1352
 
 
 
1353	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1354		return -EINVAL;
1355
1356	if (!dma)
1357		return -EINVAL;
1358
1359	spin_lock(&dev->count_lock);
1360	if (atomic_read(&dev->buf_alloc)) {
1361		spin_unlock(&dev->count_lock);
1362		return -EBUSY;
1363	}
1364	++dev->buf_use;		/* Can't allocate more after this call */
1365	spin_unlock(&dev->count_lock);
1366
1367	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1368		if (dma->bufs[i].buf_count)
1369			++count;
1370	}
1371
1372	DRM_DEBUG("count = %d\n", count);
1373
1374	if (request->count >= count) {
1375		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1376			if (dma->bufs[i].buf_count) {
1377				struct drm_buf_desc __user *to =
1378				    &request->list[count];
1379				struct drm_buf_entry *from = &dma->bufs[i];
1380				struct drm_freelist *list = &dma->bufs[i].freelist;
1381				if (copy_to_user(&to->count,
1382						 &from->buf_count,
1383						 sizeof(from->buf_count)) ||
1384				    copy_to_user(&to->size,
1385						 &from->buf_size,
1386						 sizeof(from->buf_size)) ||
1387				    copy_to_user(&to->low_mark,
1388						 &list->low_mark,
1389						 sizeof(list->low_mark)) ||
1390				    copy_to_user(&to->high_mark,
1391						 &list->high_mark,
1392						 sizeof(list->high_mark)))
1393					return -EFAULT;
1394
1395				DRM_DEBUG("%d %d %d %d %d\n",
1396					  i,
1397					  dma->bufs[i].buf_count,
1398					  dma->bufs[i].buf_size,
1399					  dma->bufs[i].freelist.low_mark,
1400					  dma->bufs[i].freelist.high_mark);
1401				++count;
1402			}
1403		}
1404	}
1405	request->count = count;
1406
1407	return 0;
1408}
1409
1410/**
1411 * Specifies a low and high water mark for buffer allocation
1412 *
1413 * \param inode device inode.
1414 * \param file_priv DRM file private.
1415 * \param cmd command.
1416 * \param arg a pointer to a drm_buf_desc structure.
1417 * \return zero on success or a negative number on failure.
1418 *
1419 * Verifies that the size order is bounded between the admissible orders and
1420 * updates the respective drm_device_dma::bufs entry low and high water mark.
1421 *
1422 * \note This ioctl is deprecated and mostly never used.
1423 */
1424int drm_markbufs(struct drm_device *dev, void *data,
1425		 struct drm_file *file_priv)
1426{
1427	struct drm_device_dma *dma = dev->dma;
1428	struct drm_buf_desc *request = data;
1429	int order;
1430	struct drm_buf_entry *entry;
1431
 
 
 
1432	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1433		return -EINVAL;
1434
1435	if (!dma)
1436		return -EINVAL;
1437
1438	DRM_DEBUG("%d, %d, %d\n",
1439		  request->size, request->low_mark, request->high_mark);
1440	order = drm_order(request->size);
1441	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1442		return -EINVAL;
1443	entry = &dma->bufs[order];
1444
1445	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1446		return -EINVAL;
1447	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1448		return -EINVAL;
1449
1450	entry->freelist.low_mark = request->low_mark;
1451	entry->freelist.high_mark = request->high_mark;
1452
1453	return 0;
1454}
1455
1456/**
1457 * Unreserve the buffers in list, previously reserved using drmDMA.
1458 *
1459 * \param inode device inode.
1460 * \param file_priv DRM file private.
1461 * \param cmd command.
1462 * \param arg pointer to a drm_buf_free structure.
1463 * \return zero on success or a negative number on failure.
1464 *
1465 * Calls free_buffer() for each used buffer.
1466 * This function is primarily used for debugging.
1467 */
1468int drm_freebufs(struct drm_device *dev, void *data,
1469		 struct drm_file *file_priv)
1470{
1471	struct drm_device_dma *dma = dev->dma;
1472	struct drm_buf_free *request = data;
1473	int i;
1474	int idx;
1475	struct drm_buf *buf;
1476
 
 
 
1477	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1478		return -EINVAL;
1479
1480	if (!dma)
1481		return -EINVAL;
1482
1483	DRM_DEBUG("%d\n", request->count);
1484	for (i = 0; i < request->count; i++) {
1485		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1486			return -EFAULT;
1487		if (idx < 0 || idx >= dma->buf_count) {
1488			DRM_ERROR("Index %d (of %d max)\n",
1489				  idx, dma->buf_count - 1);
1490			return -EINVAL;
1491		}
1492		buf = dma->buflist[idx];
1493		if (buf->file_priv != file_priv) {
1494			DRM_ERROR("Process %d freeing buffer not owned\n",
1495				  task_pid_nr(current));
1496			return -EINVAL;
1497		}
1498		drm_free_buffer(dev, buf);
1499	}
1500
1501	return 0;
1502}
1503
1504/**
1505 * Maps all of the DMA buffers into client-virtual space (ioctl).
1506 *
1507 * \param inode device inode.
1508 * \param file_priv DRM file private.
1509 * \param cmd command.
1510 * \param arg pointer to a drm_buf_map structure.
1511 * \return zero on success or a negative number on failure.
1512 *
1513 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1514 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1515 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1516 * drm_mmap_dma().
1517 */
1518int drm_mapbufs(struct drm_device *dev, void *data,
1519	        struct drm_file *file_priv)
1520{
1521	struct drm_device_dma *dma = dev->dma;
1522	int retcode = 0;
1523	const int zero = 0;
1524	unsigned long virtual;
1525	unsigned long address;
1526	struct drm_buf_map *request = data;
1527	int i;
1528
 
 
 
1529	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1530		return -EINVAL;
1531
1532	if (!dma)
1533		return -EINVAL;
1534
1535	spin_lock(&dev->count_lock);
1536	if (atomic_read(&dev->buf_alloc)) {
1537		spin_unlock(&dev->count_lock);
1538		return -EBUSY;
1539	}
1540	dev->buf_use++;		/* Can't allocate more after this call */
1541	spin_unlock(&dev->count_lock);
1542
1543	if (request->count >= dma->buf_count) {
1544		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1545		    || (drm_core_check_feature(dev, DRIVER_SG)
1546			&& (dma->flags & _DRM_DMA_USE_SG))
1547		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1548			&& (dma->flags & _DRM_DMA_USE_FB))) {
1549			struct drm_local_map *map = dev->agp_buffer_map;
1550			unsigned long token = dev->agp_buffer_token;
1551
1552			if (!map) {
1553				retcode = -EINVAL;
1554				goto done;
1555			}
1556			virtual = vm_mmap(file_priv->filp, 0, map->size,
1557					  PROT_READ | PROT_WRITE,
1558					  MAP_SHARED,
1559					  token);
1560		} else {
1561			virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1562					  PROT_READ | PROT_WRITE,
1563					  MAP_SHARED, 0);
1564		}
1565		if (virtual > -1024UL) {
1566			/* Real error */
1567			retcode = (signed long)virtual;
1568			goto done;
1569		}
1570		request->virtual = (void __user *)virtual;
1571
1572		for (i = 0; i < dma->buf_count; i++) {
1573			if (copy_to_user(&request->list[i].idx,
1574					 &dma->buflist[i]->idx,
1575					 sizeof(request->list[0].idx))) {
1576				retcode = -EFAULT;
1577				goto done;
1578			}
1579			if (copy_to_user(&request->list[i].total,
1580					 &dma->buflist[i]->total,
1581					 sizeof(request->list[0].total))) {
1582				retcode = -EFAULT;
1583				goto done;
1584			}
1585			if (copy_to_user(&request->list[i].used,
1586					 &zero, sizeof(zero))) {
1587				retcode = -EFAULT;
1588				goto done;
1589			}
1590			address = virtual + dma->buflist[i]->offset;	/* *** */
1591			if (copy_to_user(&request->list[i].address,
1592					 &address, sizeof(address))) {
1593				retcode = -EFAULT;
1594				goto done;
1595			}
1596		}
1597	}
1598      done:
1599	request->count = dma->buf_count;
1600	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1601
1602	return retcode;
1603}
1604
1605/**
1606 * Compute size order.  Returns the exponent of the smaller power of two which
1607 * is greater or equal to given number.
1608 *
1609 * \param size size.
1610 * \return order.
1611 *
1612 * \todo Can be made faster.
1613 */
1614int drm_order(unsigned long size)
1615{
1616	int order;
1617	unsigned long tmp;
1618
1619	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
 
 
 
 
1620
1621	if (size & (size - 1))
1622		++order;
 
1623
1624	return order;
 
 
 
 
 
 
1625}
1626EXPORT_SYMBOL(drm_order);