Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * AGPGART driver.
   3 * Copyright (C) 2004 Silicon Graphics, Inc.
   4 * Copyright (C) 2002-2005 Dave Jones.
   5 * Copyright (C) 1999 Jeff Hartmann.
   6 * Copyright (C) 1999 Precision Insight, Inc.
   7 * Copyright (C) 1999 Xi Graphics, Inc.
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice shall be included
  17 * in all copies or substantial portions of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
  25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 * TODO:
  28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
  29 */
  30#include <linux/module.h>
  31#include <linux/pci.h>
  32#include <linux/init.h>
  33#include <linux/pagemap.h>
  34#include <linux/miscdevice.h>
  35#include <linux/pm.h>
  36#include <linux/agp_backend.h>
  37#include <linux/vmalloc.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/mm.h>
  40#include <linux/sched.h>
  41#include <linux/slab.h>
  42#include <asm/io.h>
  43#include <asm/cacheflush.h>
  44#include <asm/pgtable.h>
  45#include "agp.h"
  46
  47__u32 *agp_gatt_table;
  48int agp_memory_reserved;
  49
  50/*
  51 * Needed by the Nforce GART driver for the time being. Would be
  52 * nice to do this some other way instead of needing this export.
  53 */
  54EXPORT_SYMBOL_GPL(agp_memory_reserved);
  55
  56/*
  57 * Generic routines for handling agp_memory structures -
  58 * They use the basic page allocation routines to do the brunt of the work.
  59 */
  60
  61void agp_free_key(int key)
  62{
  63	if (key < 0)
  64		return;
  65
  66	if (key < MAXKEY)
  67		clear_bit(key, agp_bridge->key_list);
  68}
  69EXPORT_SYMBOL(agp_free_key);
  70
  71
  72static int agp_get_key(void)
  73{
  74	int bit;
  75
  76	bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
  77	if (bit < MAXKEY) {
  78		set_bit(bit, agp_bridge->key_list);
  79		return bit;
  80	}
  81	return -1;
  82}
  83
  84/*
  85 * Use kmalloc if possible for the page list. Otherwise fall back to
  86 * vmalloc. This speeds things up and also saves memory for small AGP
  87 * regions.
  88 */
  89
  90void agp_alloc_page_array(size_t size, struct agp_memory *mem)
  91{
  92	mem->pages = NULL;
  93
  94	if (size <= 2*PAGE_SIZE)
  95		mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
  96	if (mem->pages == NULL) {
  97		mem->pages = vmalloc(size);
  98	}
  99}
 100EXPORT_SYMBOL(agp_alloc_page_array);
 101
 102void agp_free_page_array(struct agp_memory *mem)
 103{
 104	if (is_vmalloc_addr(mem->pages)) {
 105		vfree(mem->pages);
 106	} else {
 107		kfree(mem->pages);
 108	}
 109}
 110EXPORT_SYMBOL(agp_free_page_array);
 111
 112
 113static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
 114{
 115	struct agp_memory *new;
 116	unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
 117
 118	if (INT_MAX/sizeof(struct page *) < num_agp_pages)
 119		return NULL;
 120
 121	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
 122	if (new == NULL)
 123		return NULL;
 124
 125	new->key = agp_get_key();
 126
 127	if (new->key < 0) {
 128		kfree(new);
 129		return NULL;
 130	}
 131
 132	agp_alloc_page_array(alloc_size, new);
 133
 134	if (new->pages == NULL) {
 135		agp_free_key(new->key);
 136		kfree(new);
 137		return NULL;
 138	}
 139	new->num_scratch_pages = 0;
 140	return new;
 141}
 142
 143struct agp_memory *agp_create_memory(int scratch_pages)
 144{
 145	struct agp_memory *new;
 146
 147	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
 148	if (new == NULL)
 149		return NULL;
 150
 151	new->key = agp_get_key();
 152
 153	if (new->key < 0) {
 154		kfree(new);
 155		return NULL;
 156	}
 157
 158	agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
 159
 160	if (new->pages == NULL) {
 161		agp_free_key(new->key);
 162		kfree(new);
 163		return NULL;
 164	}
 165	new->num_scratch_pages = scratch_pages;
 166	new->type = AGP_NORMAL_MEMORY;
 167	return new;
 168}
 169EXPORT_SYMBOL(agp_create_memory);
 170
 171/**
 172 *	agp_free_memory - free memory associated with an agp_memory pointer.
 173 *
 174 *	@curr:		agp_memory pointer to be freed.
 175 *
 176 *	It is the only function that can be called when the backend is not owned
 177 *	by the caller.  (So it can free memory on client death.)
 178 */
 179void agp_free_memory(struct agp_memory *curr)
 180{
 181	size_t i;
 182
 183	if (curr == NULL)
 184		return;
 185
 186	if (curr->is_bound)
 187		agp_unbind_memory(curr);
 188
 189	if (curr->type >= AGP_USER_TYPES) {
 190		agp_generic_free_by_type(curr);
 191		return;
 192	}
 193
 194	if (curr->type != 0) {
 195		curr->bridge->driver->free_by_type(curr);
 196		return;
 197	}
 198	if (curr->page_count != 0) {
 199		if (curr->bridge->driver->agp_destroy_pages) {
 200			curr->bridge->driver->agp_destroy_pages(curr);
 201		} else {
 202
 203			for (i = 0; i < curr->page_count; i++) {
 204				curr->bridge->driver->agp_destroy_page(
 205					curr->pages[i],
 206					AGP_PAGE_DESTROY_UNMAP);
 207			}
 208			for (i = 0; i < curr->page_count; i++) {
 209				curr->bridge->driver->agp_destroy_page(
 210					curr->pages[i],
 211					AGP_PAGE_DESTROY_FREE);
 212			}
 213		}
 214	}
 215	agp_free_key(curr->key);
 216	agp_free_page_array(curr);
 217	kfree(curr);
 218}
 219EXPORT_SYMBOL(agp_free_memory);
 220
 221#define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
 222
 223/**
 224 *	agp_allocate_memory  -  allocate a group of pages of a certain type.
 225 *
 226 *	@page_count:	size_t argument of the number of pages
 227 *	@type:	u32 argument of the type of memory to be allocated.
 228 *
 229 *	Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
 230 *	maps to physical ram.  Any other type is device dependent.
 231 *
 232 *	It returns NULL whenever memory is unavailable.
 233 */
 234struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
 235					size_t page_count, u32 type)
 236{
 237	int scratch_pages;
 238	struct agp_memory *new;
 239	size_t i;
 240	int cur_memory;
 241
 242	if (!bridge)
 243		return NULL;
 244
 245	cur_memory = atomic_read(&bridge->current_memory_agp);
 246	if ((cur_memory + page_count > bridge->max_memory_agp) ||
 247	    (cur_memory + page_count < page_count))
 248		return NULL;
 249
 250	if (type >= AGP_USER_TYPES) {
 251		new = agp_generic_alloc_user(page_count, type);
 252		if (new)
 253			new->bridge = bridge;
 254		return new;
 255	}
 256
 257	if (type != 0) {
 258		new = bridge->driver->alloc_by_type(page_count, type);
 259		if (new)
 260			new->bridge = bridge;
 261		return new;
 262	}
 263
 264	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
 265
 266	new = agp_create_memory(scratch_pages);
 267
 268	if (new == NULL)
 269		return NULL;
 270
 271	if (bridge->driver->agp_alloc_pages) {
 272		if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
 273			agp_free_memory(new);
 274			return NULL;
 275		}
 276		new->bridge = bridge;
 277		return new;
 278	}
 279
 280	for (i = 0; i < page_count; i++) {
 281		struct page *page = bridge->driver->agp_alloc_page(bridge);
 282
 283		if (page == NULL) {
 284			agp_free_memory(new);
 285			return NULL;
 286		}
 287		new->pages[i] = page;
 288		new->page_count++;
 289	}
 290	new->bridge = bridge;
 291
 292	return new;
 293}
 294EXPORT_SYMBOL(agp_allocate_memory);
 295
 296
 297/* End - Generic routines for handling agp_memory structures */
 298
 299
 300static int agp_return_size(void)
 301{
 302	int current_size;
 303	void *temp;
 304
 305	temp = agp_bridge->current_size;
 306
 307	switch (agp_bridge->driver->size_type) {
 308	case U8_APER_SIZE:
 309		current_size = A_SIZE_8(temp)->size;
 310		break;
 311	case U16_APER_SIZE:
 312		current_size = A_SIZE_16(temp)->size;
 313		break;
 314	case U32_APER_SIZE:
 315		current_size = A_SIZE_32(temp)->size;
 316		break;
 317	case LVL2_APER_SIZE:
 318		current_size = A_SIZE_LVL2(temp)->size;
 319		break;
 320	case FIXED_APER_SIZE:
 321		current_size = A_SIZE_FIX(temp)->size;
 322		break;
 323	default:
 324		current_size = 0;
 325		break;
 326	}
 327
 328	current_size -= (agp_memory_reserved / (1024*1024));
 329	if (current_size <0)
 330		current_size = 0;
 331	return current_size;
 332}
 333
 334
 335int agp_num_entries(void)
 336{
 337	int num_entries;
 338	void *temp;
 339
 340	temp = agp_bridge->current_size;
 341
 342	switch (agp_bridge->driver->size_type) {
 343	case U8_APER_SIZE:
 344		num_entries = A_SIZE_8(temp)->num_entries;
 345		break;
 346	case U16_APER_SIZE:
 347		num_entries = A_SIZE_16(temp)->num_entries;
 348		break;
 349	case U32_APER_SIZE:
 350		num_entries = A_SIZE_32(temp)->num_entries;
 351		break;
 352	case LVL2_APER_SIZE:
 353		num_entries = A_SIZE_LVL2(temp)->num_entries;
 354		break;
 355	case FIXED_APER_SIZE:
 356		num_entries = A_SIZE_FIX(temp)->num_entries;
 357		break;
 358	default:
 359		num_entries = 0;
 360		break;
 361	}
 362
 363	num_entries -= agp_memory_reserved>>PAGE_SHIFT;
 364	if (num_entries<0)
 365		num_entries = 0;
 366	return num_entries;
 367}
 368EXPORT_SYMBOL_GPL(agp_num_entries);
 369
 370
 371/**
 372 *	agp_copy_info  -  copy bridge state information
 373 *
 374 *	@info:		agp_kern_info pointer.  The caller should insure that this pointer is valid.
 375 *
 376 *	This function copies information about the agp bridge device and the state of
 377 *	the agp backend into an agp_kern_info pointer.
 378 */
 379int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
 380{
 381	memset(info, 0, sizeof(struct agp_kern_info));
 382	if (!bridge) {
 383		info->chipset = NOT_SUPPORTED;
 384		return -EIO;
 385	}
 386
 387	info->version.major = bridge->version->major;
 388	info->version.minor = bridge->version->minor;
 389	info->chipset = SUPPORTED;
 390	info->device = bridge->dev;
 391	if (bridge->mode & AGPSTAT_MODE_3_0)
 392		info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
 393	else
 394		info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
 395	info->aper_base = bridge->gart_bus_addr;
 396	info->aper_size = agp_return_size();
 397	info->max_memory = bridge->max_memory_agp;
 398	info->current_memory = atomic_read(&bridge->current_memory_agp);
 399	info->cant_use_aperture = bridge->driver->cant_use_aperture;
 400	info->vm_ops = bridge->vm_ops;
 401	info->page_mask = ~0UL;
 402	return 0;
 403}
 404EXPORT_SYMBOL(agp_copy_info);
 405
 406/* End - Routine to copy over information structure */
 407
 408/*
 409 * Routines for handling swapping of agp_memory into the GATT -
 410 * These routines take agp_memory and insert them into the GATT.
 411 * They call device specific routines to actually write to the GATT.
 412 */
 413
 414/**
 415 *	agp_bind_memory  -  Bind an agp_memory structure into the GATT.
 416 *
 417 *	@curr:		agp_memory pointer
 418 *	@pg_start:	an offset into the graphics aperture translation table
 419 *
 420 *	It returns -EINVAL if the pointer == NULL.
 421 *	It returns -EBUSY if the area of the table requested is already in use.
 422 */
 423int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
 424{
 425	int ret_val;
 426
 427	if (curr == NULL)
 428		return -EINVAL;
 429
 430	if (curr->is_bound) {
 431		printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
 432		return -EINVAL;
 433	}
 434	if (!curr->is_flushed) {
 435		curr->bridge->driver->cache_flush();
 436		curr->is_flushed = true;
 437	}
 438
 439	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
 440
 441	if (ret_val != 0)
 442		return ret_val;
 443
 444	curr->is_bound = true;
 445	curr->pg_start = pg_start;
 446	spin_lock(&agp_bridge->mapped_lock);
 447	list_add(&curr->mapped_list, &agp_bridge->mapped_list);
 448	spin_unlock(&agp_bridge->mapped_lock);
 449
 450	return 0;
 451}
 452EXPORT_SYMBOL(agp_bind_memory);
 453
 454
 455/**
 456 *	agp_unbind_memory  -  Removes an agp_memory structure from the GATT
 457 *
 458 * @curr:	agp_memory pointer to be removed from the GATT.
 459 *
 460 * It returns -EINVAL if this piece of agp_memory is not currently bound to
 461 * the graphics aperture translation table or if the agp_memory pointer == NULL
 462 */
 463int agp_unbind_memory(struct agp_memory *curr)
 464{
 465	int ret_val;
 466
 467	if (curr == NULL)
 468		return -EINVAL;
 469
 470	if (!curr->is_bound) {
 471		printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
 472		return -EINVAL;
 473	}
 474
 475	ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
 476
 477	if (ret_val != 0)
 478		return ret_val;
 479
 480	curr->is_bound = false;
 481	curr->pg_start = 0;
 482	spin_lock(&curr->bridge->mapped_lock);
 483	list_del(&curr->mapped_list);
 484	spin_unlock(&curr->bridge->mapped_lock);
 485	return 0;
 486}
 487EXPORT_SYMBOL(agp_unbind_memory);
 488
 489
 490/* End - Routines for handling swapping of agp_memory into the GATT */
 491
 492
 493/* Generic Agp routines - Start */
 494static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
 495{
 496	u32 tmp;
 497
 498	if (*requested_mode & AGP2_RESERVED_MASK) {
 499		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
 500			*requested_mode & AGP2_RESERVED_MASK, *requested_mode);
 501		*requested_mode &= ~AGP2_RESERVED_MASK;
 502	}
 503
 504	/*
 505	 * Some dumb bridges are programmed to disobey the AGP2 spec.
 506	 * This is likely a BIOS misprogramming rather than poweron default, or
 507	 * it would be a lot more common.
 508	 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
 509	 * AGPv2 spec 6.1.9 states:
 510	 *   The RATE field indicates the data transfer rates supported by this
 511	 *   device. A.G.P. devices must report all that apply.
 512	 * Fix them up as best we can.
 513	 */
 514	switch (*bridge_agpstat & 7) {
 515	case 4:
 516		*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
 517		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
 518			"Fixing up support for x2 & x1\n");
 519		break;
 520	case 2:
 521		*bridge_agpstat |= AGPSTAT2_1X;
 522		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
 523			"Fixing up support for x1\n");
 524		break;
 525	default:
 526		break;
 527	}
 528
 529	/* Check the speed bits make sense. Only one should be set. */
 530	tmp = *requested_mode & 7;
 531	switch (tmp) {
 532		case 0:
 533			printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
 534			*requested_mode |= AGPSTAT2_1X;
 535			break;
 536		case 1:
 537		case 2:
 538			break;
 539		case 3:
 540			*requested_mode &= ~(AGPSTAT2_1X);	/* rate=2 */
 541			break;
 542		case 4:
 543			break;
 544		case 5:
 545		case 6:
 546		case 7:
 547			*requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
 548			break;
 549	}
 550
 551	/* disable SBA if it's not supported */
 552	if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
 553		*bridge_agpstat &= ~AGPSTAT_SBA;
 554
 555	/* Set rate */
 556	if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
 557		*bridge_agpstat &= ~AGPSTAT2_4X;
 558
 559	if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
 560		*bridge_agpstat &= ~AGPSTAT2_2X;
 561
 562	if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
 563		*bridge_agpstat &= ~AGPSTAT2_1X;
 564
 565	/* Now we know what mode it should be, clear out the unwanted bits. */
 566	if (*bridge_agpstat & AGPSTAT2_4X)
 567		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X);	/* 4X */
 568
 569	if (*bridge_agpstat & AGPSTAT2_2X)
 570		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X);	/* 2X */
 571
 572	if (*bridge_agpstat & AGPSTAT2_1X)
 573		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);	/* 1X */
 574
 575	/* Apply any errata. */
 576	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
 577		*bridge_agpstat &= ~AGPSTAT_FW;
 578
 579	if (agp_bridge->flags & AGP_ERRATA_SBA)
 580		*bridge_agpstat &= ~AGPSTAT_SBA;
 581
 582	if (agp_bridge->flags & AGP_ERRATA_1X) {
 583		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
 584		*bridge_agpstat |= AGPSTAT2_1X;
 585	}
 586
 587	/* If we've dropped down to 1X, disable fast writes. */
 588	if (*bridge_agpstat & AGPSTAT2_1X)
 589		*bridge_agpstat &= ~AGPSTAT_FW;
 590}
 591
 592/*
 593 * requested_mode = Mode requested by (typically) X.
 594 * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
 595 * vga_agpstat = PCI_AGP_STATUS from graphic card.
 596 */
 597static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
 598{
 599	u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
 600	u32 tmp;
 601
 602	if (*requested_mode & AGP3_RESERVED_MASK) {
 603		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
 604			*requested_mode & AGP3_RESERVED_MASK, *requested_mode);
 605		*requested_mode &= ~AGP3_RESERVED_MASK;
 606	}
 607
 608	/* Check the speed bits make sense. */
 609	tmp = *requested_mode & 7;
 610	if (tmp == 0) {
 611		printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
 612		*requested_mode |= AGPSTAT3_4X;
 613	}
 614	if (tmp >= 3) {
 615		printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
 616		*requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
 617	}
 618
 619	/* ARQSZ - Set the value to the maximum one.
 620	 * Don't allow the mode register to override values. */
 621	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
 622		max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
 623
 624	/* Calibration cycle.
 625	 * Don't allow the mode register to override values. */
 626	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
 627		min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
 628
 629	/* SBA *must* be supported for AGP v3 */
 630	*bridge_agpstat |= AGPSTAT_SBA;
 631
 632	/*
 633	 * Set speed.
 634	 * Check for invalid speeds. This can happen when applications
 635	 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
 636	 */
 637	if (*requested_mode & AGPSTAT_MODE_3_0) {
 638		/*
 639		 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
 640		 * have been passed a 3.0 mode, but with 2.x speed bits set.
 641		 * AGP2.x 4x -> AGP3.0 4x.
 642		 */
 643		if (*requested_mode & AGPSTAT2_4X) {
 644			printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
 645						current->comm, *requested_mode);
 646			*requested_mode &= ~AGPSTAT2_4X;
 647			*requested_mode |= AGPSTAT3_4X;
 648		}
 649	} else {
 650		/*
 651		 * The caller doesn't know what they are doing. We are in 3.0 mode,
 652		 * but have been passed an AGP 2.x mode.
 653		 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
 654		 */
 655		printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
 656					current->comm, *requested_mode);
 657		*requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
 658		*requested_mode |= AGPSTAT3_4X;
 659	}
 660
 661	if (*requested_mode & AGPSTAT3_8X) {
 662		if (!(*bridge_agpstat & AGPSTAT3_8X)) {
 663			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 664			*bridge_agpstat |= AGPSTAT3_4X;
 665			printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
 666			return;
 667		}
 668		if (!(*vga_agpstat & AGPSTAT3_8X)) {
 669			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 670			*bridge_agpstat |= AGPSTAT3_4X;
 671			printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
 672			return;
 673		}
 674		/* All set, bridge & device can do AGP x8*/
 675		*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 676		goto done;
 677
 678	} else if (*requested_mode & AGPSTAT3_4X) {
 679		*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 680		*bridge_agpstat |= AGPSTAT3_4X;
 681		goto done;
 682
 683	} else {
 684
 685		/*
 686		 * If we didn't specify an AGP mode, we see if both
 687		 * the graphics card, and the bridge can do x8, and use if so.
 688		 * If not, we fall back to x4 mode.
 689		 */
 690		if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
 691			printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
 692				"supported by bridge & card (x8).\n");
 693			*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 694			*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 695		} else {
 696			printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
 697			if (!(*bridge_agpstat & AGPSTAT3_8X)) {
 698				printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
 699					*bridge_agpstat, origbridge);
 700				*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 701				*bridge_agpstat |= AGPSTAT3_4X;
 702			}
 703			if (!(*vga_agpstat & AGPSTAT3_8X)) {
 704				printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
 705					*vga_agpstat, origvga);
 706				*vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 707				*vga_agpstat |= AGPSTAT3_4X;
 708			}
 709		}
 710	}
 711
 712done:
 713	/* Apply any errata. */
 714	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
 715		*bridge_agpstat &= ~AGPSTAT_FW;
 716
 717	if (agp_bridge->flags & AGP_ERRATA_SBA)
 718		*bridge_agpstat &= ~AGPSTAT_SBA;
 719
 720	if (agp_bridge->flags & AGP_ERRATA_1X) {
 721		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
 722		*bridge_agpstat |= AGPSTAT2_1X;
 723	}
 724}
 725
 726
 727/**
 728 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
 729 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
 730 * @requested_mode: requested agp_stat from userspace (Typically from X)
 731 * @bridge_agpstat: current agp_stat from AGP bridge.
 732 *
 733 * This function will hunt for an AGP graphics card, and try to match
 734 * the requested mode to the capabilities of both the bridge and the card.
 735 */
 736u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
 737{
 738	struct pci_dev *device = NULL;
 739	u32 vga_agpstat;
 740	u8 cap_ptr;
 741
 742	for (;;) {
 743		device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
 744		if (!device) {
 745			printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
 746			return 0;
 747		}
 748		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
 749		if (cap_ptr)
 750			break;
 751	}
 752
 753	/*
 754	 * Ok, here we have a AGP device. Disable impossible
 755	 * settings, and adjust the readqueue to the minimum.
 756	 */
 757	pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
 758
 759	/* adjust RQ depth */
 760	bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
 761	     min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
 762		 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
 763
 764	/* disable FW if it's not supported */
 765	if (!((bridge_agpstat & AGPSTAT_FW) &&
 766		 (vga_agpstat & AGPSTAT_FW) &&
 767		 (requested_mode & AGPSTAT_FW)))
 768		bridge_agpstat &= ~AGPSTAT_FW;
 769
 770	/* Check to see if we are operating in 3.0 mode */
 771	if (agp_bridge->mode & AGPSTAT_MODE_3_0)
 772		agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
 773	else
 774		agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
 775
 776	pci_dev_put(device);
 777	return bridge_agpstat;
 778}
 779EXPORT_SYMBOL(agp_collect_device_status);
 780
 781
 782void agp_device_command(u32 bridge_agpstat, bool agp_v3)
 783{
 784	struct pci_dev *device = NULL;
 785	int mode;
 786
 787	mode = bridge_agpstat & 0x7;
 788	if (agp_v3)
 789		mode *= 4;
 790
 791	for_each_pci_dev(device) {
 792		u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
 793		if (!agp)
 794			continue;
 795
 796		dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
 797			 agp_v3 ? 3 : 2, mode);
 798		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
 799	}
 800}
 801EXPORT_SYMBOL(agp_device_command);
 802
 803
 804void get_agp_version(struct agp_bridge_data *bridge)
 805{
 806	u32 ncapid;
 807
 808	/* Exit early if already set by errata workarounds. */
 809	if (bridge->major_version != 0)
 810		return;
 811
 812	pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
 813	bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
 814	bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
 815}
 816EXPORT_SYMBOL(get_agp_version);
 817
 818
 819void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
 820{
 821	u32 bridge_agpstat, temp;
 822
 823	get_agp_version(agp_bridge);
 824
 825	dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
 826		 agp_bridge->major_version, agp_bridge->minor_version);
 827
 828	pci_read_config_dword(agp_bridge->dev,
 829		      agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
 830
 831	bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
 832	if (bridge_agpstat == 0)
 833		/* Something bad happened. FIXME: Return error code? */
 834		return;
 835
 836	bridge_agpstat |= AGPSTAT_AGP_ENABLE;
 837
 838	/* Do AGP version specific frobbing. */
 839	if (bridge->major_version >= 3) {
 840		if (bridge->mode & AGPSTAT_MODE_3_0) {
 841			/* If we have 3.5, we can do the isoch stuff. */
 842			if (bridge->minor_version >= 5)
 843				agp_3_5_enable(bridge);
 844			agp_device_command(bridge_agpstat, true);
 845			return;
 846		} else {
 847		    /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
 848		    bridge_agpstat &= ~(7<<10) ;
 849		    pci_read_config_dword(bridge->dev,
 850					bridge->capndx+AGPCTRL, &temp);
 851		    temp |= (1<<9);
 852		    pci_write_config_dword(bridge->dev,
 853					bridge->capndx+AGPCTRL, temp);
 854
 855		    dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
 856		}
 857	}
 858
 859	/* AGP v<3 */
 860	agp_device_command(bridge_agpstat, false);
 861}
 862EXPORT_SYMBOL(agp_generic_enable);
 863
 864
 865int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
 866{
 867	char *table;
 868	char *table_end;
 869	int size;
 870	int page_order;
 871	int num_entries;
 872	int i;
 873	void *temp;
 874	struct page *page;
 875
 876	/* The generic routines can't handle 2 level gatt's */
 877	if (bridge->driver->size_type == LVL2_APER_SIZE)
 878		return -EINVAL;
 879
 880	table = NULL;
 881	i = bridge->aperture_size_idx;
 882	temp = bridge->current_size;
 883	size = page_order = num_entries = 0;
 884
 885	if (bridge->driver->size_type != FIXED_APER_SIZE) {
 886		do {
 887			switch (bridge->driver->size_type) {
 888			case U8_APER_SIZE:
 889				size = A_SIZE_8(temp)->size;
 890				page_order =
 891				    A_SIZE_8(temp)->page_order;
 892				num_entries =
 893				    A_SIZE_8(temp)->num_entries;
 894				break;
 895			case U16_APER_SIZE:
 896				size = A_SIZE_16(temp)->size;
 897				page_order = A_SIZE_16(temp)->page_order;
 898				num_entries = A_SIZE_16(temp)->num_entries;
 899				break;
 900			case U32_APER_SIZE:
 901				size = A_SIZE_32(temp)->size;
 902				page_order = A_SIZE_32(temp)->page_order;
 903				num_entries = A_SIZE_32(temp)->num_entries;
 904				break;
 905				/* This case will never really happen. */
 906			case FIXED_APER_SIZE:
 907			case LVL2_APER_SIZE:
 908			default:
 909				size = page_order = num_entries = 0;
 910				break;
 911			}
 912
 913			table = alloc_gatt_pages(page_order);
 914
 915			if (table == NULL) {
 916				i++;
 917				switch (bridge->driver->size_type) {
 918				case U8_APER_SIZE:
 919					bridge->current_size = A_IDX8(bridge);
 920					break;
 921				case U16_APER_SIZE:
 922					bridge->current_size = A_IDX16(bridge);
 923					break;
 924				case U32_APER_SIZE:
 925					bridge->current_size = A_IDX32(bridge);
 926					break;
 927				/* These cases will never really happen. */
 928				case FIXED_APER_SIZE:
 929				case LVL2_APER_SIZE:
 930				default:
 931					break;
 932				}
 933				temp = bridge->current_size;
 934			} else {
 935				bridge->aperture_size_idx = i;
 936			}
 937		} while (!table && (i < bridge->driver->num_aperture_sizes));
 938	} else {
 939		size = ((struct aper_size_info_fixed *) temp)->size;
 940		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
 941		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
 942		table = alloc_gatt_pages(page_order);
 943	}
 944
 945	if (table == NULL)
 946		return -ENOMEM;
 947
 948	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
 949
 950	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
 951		SetPageReserved(page);
 952
 953	bridge->gatt_table_real = (u32 *) table;
 954	agp_gatt_table = (void *)table;
 955
 956	bridge->driver->cache_flush();
 957#ifdef CONFIG_X86
 958	if (set_memory_uc((unsigned long)table, 1 << page_order))
 959		printk(KERN_WARNING "Could not set GATT table memory to UC!");
 960
 961	bridge->gatt_table = (void *)table;
 962#else
 963	bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
 964					(PAGE_SIZE * (1 << page_order)));
 965	bridge->driver->cache_flush();
 966#endif
 967
 968	if (bridge->gatt_table == NULL) {
 969		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
 970			ClearPageReserved(page);
 971
 972		free_gatt_pages(table, page_order);
 973
 974		return -ENOMEM;
 975	}
 976	bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
 977
 978	/* AK: bogus, should encode addresses > 4GB */
 979	for (i = 0; i < num_entries; i++) {
 980		writel(bridge->scratch_page, bridge->gatt_table+i);
 981		readl(bridge->gatt_table+i);	/* PCI Posting. */
 982	}
 983
 984	return 0;
 985}
 986EXPORT_SYMBOL(agp_generic_create_gatt_table);
 987
 988int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
 989{
 990	int page_order;
 991	char *table, *table_end;
 992	void *temp;
 993	struct page *page;
 994
 995	temp = bridge->current_size;
 996
 997	switch (bridge->driver->size_type) {
 998	case U8_APER_SIZE:
 999		page_order = A_SIZE_8(temp)->page_order;
1000		break;
1001	case U16_APER_SIZE:
1002		page_order = A_SIZE_16(temp)->page_order;
1003		break;
1004	case U32_APER_SIZE:
1005		page_order = A_SIZE_32(temp)->page_order;
1006		break;
1007	case FIXED_APER_SIZE:
1008		page_order = A_SIZE_FIX(temp)->page_order;
1009		break;
1010	case LVL2_APER_SIZE:
1011		/* The generic routines can't deal with 2 level gatt's */
1012		return -EINVAL;
1013		break;
1014	default:
1015		page_order = 0;
1016		break;
1017	}
1018
1019	/* Do not worry about freeing memory, because if this is
1020	 * called, then all agp memory is deallocated and removed
1021	 * from the table. */
1022
1023#ifdef CONFIG_X86
1024	set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
1025#else
1026	iounmap(bridge->gatt_table);
1027#endif
1028	table = (char *) bridge->gatt_table_real;
1029	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
1030
1031	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
1032		ClearPageReserved(page);
1033
1034	free_gatt_pages(bridge->gatt_table_real, page_order);
1035
1036	agp_gatt_table = NULL;
1037	bridge->gatt_table = NULL;
1038	bridge->gatt_table_real = NULL;
1039	bridge->gatt_bus_addr = 0;
1040
1041	return 0;
1042}
1043EXPORT_SYMBOL(agp_generic_free_gatt_table);
1044
1045
1046int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1047{
1048	int num_entries;
1049	size_t i;
1050	off_t j;
1051	void *temp;
1052	struct agp_bridge_data *bridge;
1053	int mask_type;
1054
1055	bridge = mem->bridge;
1056	if (!bridge)
1057		return -EINVAL;
1058
1059	if (mem->page_count == 0)
1060		return 0;
1061
1062	temp = bridge->current_size;
1063
1064	switch (bridge->driver->size_type) {
1065	case U8_APER_SIZE:
1066		num_entries = A_SIZE_8(temp)->num_entries;
1067		break;
1068	case U16_APER_SIZE:
1069		num_entries = A_SIZE_16(temp)->num_entries;
1070		break;
1071	case U32_APER_SIZE:
1072		num_entries = A_SIZE_32(temp)->num_entries;
1073		break;
1074	case FIXED_APER_SIZE:
1075		num_entries = A_SIZE_FIX(temp)->num_entries;
1076		break;
1077	case LVL2_APER_SIZE:
1078		/* The generic routines can't deal with 2 level gatt's */
1079		return -EINVAL;
1080		break;
1081	default:
1082		num_entries = 0;
1083		break;
1084	}
1085
1086	num_entries -= agp_memory_reserved/PAGE_SIZE;
1087	if (num_entries < 0) num_entries = 0;
1088
1089	if (type != mem->type)
1090		return -EINVAL;
1091
1092	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1093	if (mask_type != 0) {
1094		/* The generic routines know nothing of memory types */
1095		return -EINVAL;
1096	}
1097
1098	if (((pg_start + mem->page_count) > num_entries) ||
1099	    ((pg_start + mem->page_count) < pg_start))
1100		return -EINVAL;
1101
1102	j = pg_start;
1103
1104	while (j < (pg_start + mem->page_count)) {
1105		if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
1106			return -EBUSY;
1107		j++;
1108	}
1109
1110	if (!mem->is_flushed) {
1111		bridge->driver->cache_flush();
1112		mem->is_flushed = true;
1113	}
1114
1115	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1116		writel(bridge->driver->mask_memory(bridge,
1117						   page_to_phys(mem->pages[i]),
1118						   mask_type),
1119		       bridge->gatt_table+j);
1120	}
1121	readl(bridge->gatt_table+j-1);	/* PCI Posting. */
1122
1123	bridge->driver->tlb_flush(mem);
1124	return 0;
1125}
1126EXPORT_SYMBOL(agp_generic_insert_memory);
1127
1128
1129int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1130{
1131	size_t i;
1132	struct agp_bridge_data *bridge;
1133	int mask_type, num_entries;
1134
1135	bridge = mem->bridge;
1136	if (!bridge)
1137		return -EINVAL;
1138
1139	if (mem->page_count == 0)
1140		return 0;
1141
1142	if (type != mem->type)
1143		return -EINVAL;
1144
1145	num_entries = agp_num_entries();
1146	if (((pg_start + mem->page_count) > num_entries) ||
1147	    ((pg_start + mem->page_count) < pg_start))
1148		return -EINVAL;
1149
1150	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1151	if (mask_type != 0) {
1152		/* The generic routines know nothing of memory types */
1153		return -EINVAL;
1154	}
1155
1156	/* AK: bogus, should encode addresses > 4GB */
1157	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1158		writel(bridge->scratch_page, bridge->gatt_table+i);
1159	}
1160	readl(bridge->gatt_table+i-1);	/* PCI Posting. */
1161
1162	bridge->driver->tlb_flush(mem);
1163	return 0;
1164}
1165EXPORT_SYMBOL(agp_generic_remove_memory);
1166
1167struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1168{
1169	return NULL;
1170}
1171EXPORT_SYMBOL(agp_generic_alloc_by_type);
1172
1173void agp_generic_free_by_type(struct agp_memory *curr)
1174{
1175	agp_free_page_array(curr);
1176	agp_free_key(curr->key);
1177	kfree(curr);
1178}
1179EXPORT_SYMBOL(agp_generic_free_by_type);
1180
1181struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1182{
1183	struct agp_memory *new;
1184	int i;
1185	int pages;
1186
1187	pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1188	new = agp_create_user_memory(page_count);
1189	if (new == NULL)
1190		return NULL;
1191
1192	for (i = 0; i < page_count; i++)
1193		new->pages[i] = NULL;
1194	new->page_count = 0;
1195	new->type = type;
1196	new->num_scratch_pages = pages;
1197
1198	return new;
1199}
1200EXPORT_SYMBOL(agp_generic_alloc_user);
1201
1202/*
1203 * Basic Page Allocation Routines -
1204 * These routines handle page allocation and by default they reserve the allocated
1205 * memory.  They also handle incrementing the current_memory_agp value, Which is checked
1206 * against a maximum value.
1207 */
1208
1209int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
1210{
1211	struct page * page;
1212	int i, ret = -ENOMEM;
1213
1214	for (i = 0; i < num_pages; i++) {
1215		page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1216		/* agp_free_memory() needs gart address */
1217		if (page == NULL)
1218			goto out;
1219
1220#ifndef CONFIG_X86
1221		map_page_into_agp(page);
1222#endif
1223		get_page(page);
1224		atomic_inc(&agp_bridge->current_memory_agp);
1225
1226		mem->pages[i] = page;
1227		mem->page_count++;
1228	}
1229
1230#ifdef CONFIG_X86
1231	set_pages_array_uc(mem->pages, num_pages);
1232#endif
1233	ret = 0;
1234out:
1235	return ret;
1236}
1237EXPORT_SYMBOL(agp_generic_alloc_pages);
1238
1239struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1240{
1241	struct page * page;
1242
1243	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1244	if (page == NULL)
1245		return NULL;
1246
1247	map_page_into_agp(page);
1248
1249	get_page(page);
1250	atomic_inc(&agp_bridge->current_memory_agp);
1251	return page;
1252}
1253EXPORT_SYMBOL(agp_generic_alloc_page);
1254
1255void agp_generic_destroy_pages(struct agp_memory *mem)
1256{
1257	int i;
1258	struct page *page;
1259
1260	if (!mem)
1261		return;
1262
1263#ifdef CONFIG_X86
1264	set_pages_array_wb(mem->pages, mem->page_count);
1265#endif
1266
1267	for (i = 0; i < mem->page_count; i++) {
1268		page = mem->pages[i];
1269
1270#ifndef CONFIG_X86
1271		unmap_page_from_agp(page);
1272#endif
1273		put_page(page);
1274		__free_page(page);
1275		atomic_dec(&agp_bridge->current_memory_agp);
1276		mem->pages[i] = NULL;
1277	}
1278}
1279EXPORT_SYMBOL(agp_generic_destroy_pages);
1280
1281void agp_generic_destroy_page(struct page *page, int flags)
1282{
1283	if (page == NULL)
1284		return;
1285
1286	if (flags & AGP_PAGE_DESTROY_UNMAP)
1287		unmap_page_from_agp(page);
1288
1289	if (flags & AGP_PAGE_DESTROY_FREE) {
1290		put_page(page);
1291		__free_page(page);
1292		atomic_dec(&agp_bridge->current_memory_agp);
1293	}
1294}
1295EXPORT_SYMBOL(agp_generic_destroy_page);
1296
1297/* End Basic Page Allocation Routines */
1298
1299
1300/**
1301 * agp_enable  -  initialise the agp point-to-point connection.
1302 *
1303 * @mode:	agp mode register value to configure with.
1304 */
1305void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1306{
1307	if (!bridge)
1308		return;
1309	bridge->driver->agp_enable(bridge, mode);
1310}
1311EXPORT_SYMBOL(agp_enable);
1312
1313/* When we remove the global variable agp_bridge from all drivers
1314 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1315 */
1316
1317struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1318{
1319	if (list_empty(&agp_bridges))
1320		return NULL;
1321
1322	return agp_bridge;
1323}
1324
1325static void ipi_handler(void *null)
1326{
1327	flush_agp_cache();
1328}
1329
1330void global_cache_flush(void)
1331{
1332	if (on_each_cpu(ipi_handler, NULL, 1) != 0)
1333		panic(PFX "timed out waiting for the other CPUs!\n");
1334}
1335EXPORT_SYMBOL(global_cache_flush);
1336
1337unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1338				      dma_addr_t addr, int type)
1339{
1340	/* memory type is ignored in the generic routine */
1341	if (bridge->driver->masks)
1342		return addr | bridge->driver->masks[0].mask;
1343	else
1344		return addr;
1345}
1346EXPORT_SYMBOL(agp_generic_mask_memory);
1347
1348int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1349				  int type)
1350{
1351	if (type >= AGP_USER_TYPES)
1352		return 0;
1353	return type;
1354}
1355EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1356
1357/*
1358 * These functions are implemented according to the AGPv3 spec,
1359 * which covers implementation details that had previously been
1360 * left open.
1361 */
1362
1363int agp3_generic_fetch_size(void)
1364{
1365	u16 temp_size;
1366	int i;
1367	struct aper_size_info_16 *values;
1368
1369	pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1370	values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1371
1372	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1373		if (temp_size == values[i].size_value) {
1374			agp_bridge->previous_size =
1375				agp_bridge->current_size = (void *) (values + i);
1376
1377			agp_bridge->aperture_size_idx = i;
1378			return values[i].size;
1379		}
1380	}
1381	return 0;
1382}
1383EXPORT_SYMBOL(agp3_generic_fetch_size);
1384
1385void agp3_generic_tlbflush(struct agp_memory *mem)
1386{
1387	u32 ctrl;
1388	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1389	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1390	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1391}
1392EXPORT_SYMBOL(agp3_generic_tlbflush);
1393
1394int agp3_generic_configure(void)
1395{
1396	u32 temp;
1397	struct aper_size_info_16 *current_size;
1398
1399	current_size = A_SIZE_16(agp_bridge->current_size);
1400
1401	pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1402	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1403
1404	/* set aperture size */
1405	pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1406	/* set gart pointer */
1407	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1408	/* enable aperture and GTLB */
1409	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1410	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1411	return 0;
1412}
1413EXPORT_SYMBOL(agp3_generic_configure);
1414
1415void agp3_generic_cleanup(void)
1416{
1417	u32 ctrl;
1418	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1419	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1420}
1421EXPORT_SYMBOL(agp3_generic_cleanup);
1422
1423const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1424{
1425	{4096, 1048576, 10,0x000},
1426	{2048,  524288, 9, 0x800},
1427	{1024,  262144, 8, 0xc00},
1428	{ 512,  131072, 7, 0xe00},
1429	{ 256,   65536, 6, 0xf00},
1430	{ 128,   32768, 5, 0xf20},
1431	{  64,   16384, 4, 0xf30},
1432	{  32,    8192, 3, 0xf38},
1433	{  16,    4096, 2, 0xf3c},
1434	{   8,    2048, 1, 0xf3e},
1435	{   4,    1024, 0, 0xf3f}
1436};
1437EXPORT_SYMBOL(agp3_generic_sizes);
1438
v4.6
   1/*
   2 * AGPGART driver.
   3 * Copyright (C) 2004 Silicon Graphics, Inc.
   4 * Copyright (C) 2002-2005 Dave Jones.
   5 * Copyright (C) 1999 Jeff Hartmann.
   6 * Copyright (C) 1999 Precision Insight, Inc.
   7 * Copyright (C) 1999 Xi Graphics, Inc.
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice shall be included
  17 * in all copies or substantial portions of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
  25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 * TODO:
  28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
  29 */
  30#include <linux/module.h>
  31#include <linux/pci.h>
 
  32#include <linux/pagemap.h>
  33#include <linux/miscdevice.h>
  34#include <linux/pm.h>
  35#include <linux/agp_backend.h>
  36#include <linux/vmalloc.h>
  37#include <linux/dma-mapping.h>
  38#include <linux/mm.h>
  39#include <linux/sched.h>
  40#include <linux/slab.h>
  41#include <asm/io.h>
  42#include <asm/cacheflush.h>
  43#include <asm/pgtable.h>
  44#include "agp.h"
  45
  46__u32 *agp_gatt_table;
  47int agp_memory_reserved;
  48
  49/*
  50 * Needed by the Nforce GART driver for the time being. Would be
  51 * nice to do this some other way instead of needing this export.
  52 */
  53EXPORT_SYMBOL_GPL(agp_memory_reserved);
  54
  55/*
  56 * Generic routines for handling agp_memory structures -
  57 * They use the basic page allocation routines to do the brunt of the work.
  58 */
  59
  60void agp_free_key(int key)
  61{
  62	if (key < 0)
  63		return;
  64
  65	if (key < MAXKEY)
  66		clear_bit(key, agp_bridge->key_list);
  67}
  68EXPORT_SYMBOL(agp_free_key);
  69
  70
  71static int agp_get_key(void)
  72{
  73	int bit;
  74
  75	bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
  76	if (bit < MAXKEY) {
  77		set_bit(bit, agp_bridge->key_list);
  78		return bit;
  79	}
  80	return -1;
  81}
  82
  83/*
  84 * Use kmalloc if possible for the page list. Otherwise fall back to
  85 * vmalloc. This speeds things up and also saves memory for small AGP
  86 * regions.
  87 */
  88
  89void agp_alloc_page_array(size_t size, struct agp_memory *mem)
  90{
  91	mem->pages = NULL;
  92
  93	if (size <= 2*PAGE_SIZE)
  94		mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
  95	if (mem->pages == NULL) {
  96		mem->pages = vmalloc(size);
  97	}
  98}
  99EXPORT_SYMBOL(agp_alloc_page_array);
 100
 
 
 
 
 
 
 
 
 
 
 
 101static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
 102{
 103	struct agp_memory *new;
 104	unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
 105
 106	if (INT_MAX/sizeof(struct page *) < num_agp_pages)
 107		return NULL;
 108
 109	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
 110	if (new == NULL)
 111		return NULL;
 112
 113	new->key = agp_get_key();
 114
 115	if (new->key < 0) {
 116		kfree(new);
 117		return NULL;
 118	}
 119
 120	agp_alloc_page_array(alloc_size, new);
 121
 122	if (new->pages == NULL) {
 123		agp_free_key(new->key);
 124		kfree(new);
 125		return NULL;
 126	}
 127	new->num_scratch_pages = 0;
 128	return new;
 129}
 130
 131struct agp_memory *agp_create_memory(int scratch_pages)
 132{
 133	struct agp_memory *new;
 134
 135	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
 136	if (new == NULL)
 137		return NULL;
 138
 139	new->key = agp_get_key();
 140
 141	if (new->key < 0) {
 142		kfree(new);
 143		return NULL;
 144	}
 145
 146	agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
 147
 148	if (new->pages == NULL) {
 149		agp_free_key(new->key);
 150		kfree(new);
 151		return NULL;
 152	}
 153	new->num_scratch_pages = scratch_pages;
 154	new->type = AGP_NORMAL_MEMORY;
 155	return new;
 156}
 157EXPORT_SYMBOL(agp_create_memory);
 158
 159/**
 160 *	agp_free_memory - free memory associated with an agp_memory pointer.
 161 *
 162 *	@curr:		agp_memory pointer to be freed.
 163 *
 164 *	It is the only function that can be called when the backend is not owned
 165 *	by the caller.  (So it can free memory on client death.)
 166 */
 167void agp_free_memory(struct agp_memory *curr)
 168{
 169	size_t i;
 170
 171	if (curr == NULL)
 172		return;
 173
 174	if (curr->is_bound)
 175		agp_unbind_memory(curr);
 176
 177	if (curr->type >= AGP_USER_TYPES) {
 178		agp_generic_free_by_type(curr);
 179		return;
 180	}
 181
 182	if (curr->type != 0) {
 183		curr->bridge->driver->free_by_type(curr);
 184		return;
 185	}
 186	if (curr->page_count != 0) {
 187		if (curr->bridge->driver->agp_destroy_pages) {
 188			curr->bridge->driver->agp_destroy_pages(curr);
 189		} else {
 190
 191			for (i = 0; i < curr->page_count; i++) {
 192				curr->bridge->driver->agp_destroy_page(
 193					curr->pages[i],
 194					AGP_PAGE_DESTROY_UNMAP);
 195			}
 196			for (i = 0; i < curr->page_count; i++) {
 197				curr->bridge->driver->agp_destroy_page(
 198					curr->pages[i],
 199					AGP_PAGE_DESTROY_FREE);
 200			}
 201		}
 202	}
 203	agp_free_key(curr->key);
 204	agp_free_page_array(curr);
 205	kfree(curr);
 206}
 207EXPORT_SYMBOL(agp_free_memory);
 208
 209#define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
 210
 211/**
 212 *	agp_allocate_memory  -  allocate a group of pages of a certain type.
 213 *
 214 *	@page_count:	size_t argument of the number of pages
 215 *	@type:	u32 argument of the type of memory to be allocated.
 216 *
 217 *	Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
 218 *	maps to physical ram.  Any other type is device dependent.
 219 *
 220 *	It returns NULL whenever memory is unavailable.
 221 */
 222struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
 223					size_t page_count, u32 type)
 224{
 225	int scratch_pages;
 226	struct agp_memory *new;
 227	size_t i;
 228	int cur_memory;
 229
 230	if (!bridge)
 231		return NULL;
 232
 233	cur_memory = atomic_read(&bridge->current_memory_agp);
 234	if ((cur_memory + page_count > bridge->max_memory_agp) ||
 235	    (cur_memory + page_count < page_count))
 236		return NULL;
 237
 238	if (type >= AGP_USER_TYPES) {
 239		new = agp_generic_alloc_user(page_count, type);
 240		if (new)
 241			new->bridge = bridge;
 242		return new;
 243	}
 244
 245	if (type != 0) {
 246		new = bridge->driver->alloc_by_type(page_count, type);
 247		if (new)
 248			new->bridge = bridge;
 249		return new;
 250	}
 251
 252	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
 253
 254	new = agp_create_memory(scratch_pages);
 255
 256	if (new == NULL)
 257		return NULL;
 258
 259	if (bridge->driver->agp_alloc_pages) {
 260		if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
 261			agp_free_memory(new);
 262			return NULL;
 263		}
 264		new->bridge = bridge;
 265		return new;
 266	}
 267
 268	for (i = 0; i < page_count; i++) {
 269		struct page *page = bridge->driver->agp_alloc_page(bridge);
 270
 271		if (page == NULL) {
 272			agp_free_memory(new);
 273			return NULL;
 274		}
 275		new->pages[i] = page;
 276		new->page_count++;
 277	}
 278	new->bridge = bridge;
 279
 280	return new;
 281}
 282EXPORT_SYMBOL(agp_allocate_memory);
 283
 284
 285/* End - Generic routines for handling agp_memory structures */
 286
 287
 288static int agp_return_size(void)
 289{
 290	int current_size;
 291	void *temp;
 292
 293	temp = agp_bridge->current_size;
 294
 295	switch (agp_bridge->driver->size_type) {
 296	case U8_APER_SIZE:
 297		current_size = A_SIZE_8(temp)->size;
 298		break;
 299	case U16_APER_SIZE:
 300		current_size = A_SIZE_16(temp)->size;
 301		break;
 302	case U32_APER_SIZE:
 303		current_size = A_SIZE_32(temp)->size;
 304		break;
 305	case LVL2_APER_SIZE:
 306		current_size = A_SIZE_LVL2(temp)->size;
 307		break;
 308	case FIXED_APER_SIZE:
 309		current_size = A_SIZE_FIX(temp)->size;
 310		break;
 311	default:
 312		current_size = 0;
 313		break;
 314	}
 315
 316	current_size -= (agp_memory_reserved / (1024*1024));
 317	if (current_size <0)
 318		current_size = 0;
 319	return current_size;
 320}
 321
 322
 323int agp_num_entries(void)
 324{
 325	int num_entries;
 326	void *temp;
 327
 328	temp = agp_bridge->current_size;
 329
 330	switch (agp_bridge->driver->size_type) {
 331	case U8_APER_SIZE:
 332		num_entries = A_SIZE_8(temp)->num_entries;
 333		break;
 334	case U16_APER_SIZE:
 335		num_entries = A_SIZE_16(temp)->num_entries;
 336		break;
 337	case U32_APER_SIZE:
 338		num_entries = A_SIZE_32(temp)->num_entries;
 339		break;
 340	case LVL2_APER_SIZE:
 341		num_entries = A_SIZE_LVL2(temp)->num_entries;
 342		break;
 343	case FIXED_APER_SIZE:
 344		num_entries = A_SIZE_FIX(temp)->num_entries;
 345		break;
 346	default:
 347		num_entries = 0;
 348		break;
 349	}
 350
 351	num_entries -= agp_memory_reserved>>PAGE_SHIFT;
 352	if (num_entries<0)
 353		num_entries = 0;
 354	return num_entries;
 355}
 356EXPORT_SYMBOL_GPL(agp_num_entries);
 357
 358
 359/**
 360 *	agp_copy_info  -  copy bridge state information
 361 *
 362 *	@info:		agp_kern_info pointer.  The caller should insure that this pointer is valid.
 363 *
 364 *	This function copies information about the agp bridge device and the state of
 365 *	the agp backend into an agp_kern_info pointer.
 366 */
 367int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
 368{
 369	memset(info, 0, sizeof(struct agp_kern_info));
 370	if (!bridge) {
 371		info->chipset = NOT_SUPPORTED;
 372		return -EIO;
 373	}
 374
 375	info->version.major = bridge->version->major;
 376	info->version.minor = bridge->version->minor;
 377	info->chipset = SUPPORTED;
 378	info->device = bridge->dev;
 379	if (bridge->mode & AGPSTAT_MODE_3_0)
 380		info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
 381	else
 382		info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
 383	info->aper_base = bridge->gart_bus_addr;
 384	info->aper_size = agp_return_size();
 385	info->max_memory = bridge->max_memory_agp;
 386	info->current_memory = atomic_read(&bridge->current_memory_agp);
 387	info->cant_use_aperture = bridge->driver->cant_use_aperture;
 388	info->vm_ops = bridge->vm_ops;
 389	info->page_mask = ~0UL;
 390	return 0;
 391}
 392EXPORT_SYMBOL(agp_copy_info);
 393
 394/* End - Routine to copy over information structure */
 395
 396/*
 397 * Routines for handling swapping of agp_memory into the GATT -
 398 * These routines take agp_memory and insert them into the GATT.
 399 * They call device specific routines to actually write to the GATT.
 400 */
 401
 402/**
 403 *	agp_bind_memory  -  Bind an agp_memory structure into the GATT.
 404 *
 405 *	@curr:		agp_memory pointer
 406 *	@pg_start:	an offset into the graphics aperture translation table
 407 *
 408 *	It returns -EINVAL if the pointer == NULL.
 409 *	It returns -EBUSY if the area of the table requested is already in use.
 410 */
 411int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
 412{
 413	int ret_val;
 414
 415	if (curr == NULL)
 416		return -EINVAL;
 417
 418	if (curr->is_bound) {
 419		printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
 420		return -EINVAL;
 421	}
 422	if (!curr->is_flushed) {
 423		curr->bridge->driver->cache_flush();
 424		curr->is_flushed = true;
 425	}
 426
 427	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
 428
 429	if (ret_val != 0)
 430		return ret_val;
 431
 432	curr->is_bound = true;
 433	curr->pg_start = pg_start;
 434	spin_lock(&agp_bridge->mapped_lock);
 435	list_add(&curr->mapped_list, &agp_bridge->mapped_list);
 436	spin_unlock(&agp_bridge->mapped_lock);
 437
 438	return 0;
 439}
 440EXPORT_SYMBOL(agp_bind_memory);
 441
 442
 443/**
 444 *	agp_unbind_memory  -  Removes an agp_memory structure from the GATT
 445 *
 446 * @curr:	agp_memory pointer to be removed from the GATT.
 447 *
 448 * It returns -EINVAL if this piece of agp_memory is not currently bound to
 449 * the graphics aperture translation table or if the agp_memory pointer == NULL
 450 */
 451int agp_unbind_memory(struct agp_memory *curr)
 452{
 453	int ret_val;
 454
 455	if (curr == NULL)
 456		return -EINVAL;
 457
 458	if (!curr->is_bound) {
 459		printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
 460		return -EINVAL;
 461	}
 462
 463	ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
 464
 465	if (ret_val != 0)
 466		return ret_val;
 467
 468	curr->is_bound = false;
 469	curr->pg_start = 0;
 470	spin_lock(&curr->bridge->mapped_lock);
 471	list_del(&curr->mapped_list);
 472	spin_unlock(&curr->bridge->mapped_lock);
 473	return 0;
 474}
 475EXPORT_SYMBOL(agp_unbind_memory);
 476
 477
 478/* End - Routines for handling swapping of agp_memory into the GATT */
 479
 480
 481/* Generic Agp routines - Start */
 482static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
 483{
 484	u32 tmp;
 485
 486	if (*requested_mode & AGP2_RESERVED_MASK) {
 487		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
 488			*requested_mode & AGP2_RESERVED_MASK, *requested_mode);
 489		*requested_mode &= ~AGP2_RESERVED_MASK;
 490	}
 491
 492	/*
 493	 * Some dumb bridges are programmed to disobey the AGP2 spec.
 494	 * This is likely a BIOS misprogramming rather than poweron default, or
 495	 * it would be a lot more common.
 496	 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
 497	 * AGPv2 spec 6.1.9 states:
 498	 *   The RATE field indicates the data transfer rates supported by this
 499	 *   device. A.G.P. devices must report all that apply.
 500	 * Fix them up as best we can.
 501	 */
 502	switch (*bridge_agpstat & 7) {
 503	case 4:
 504		*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
 505		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
 506			"Fixing up support for x2 & x1\n");
 507		break;
 508	case 2:
 509		*bridge_agpstat |= AGPSTAT2_1X;
 510		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
 511			"Fixing up support for x1\n");
 512		break;
 513	default:
 514		break;
 515	}
 516
 517	/* Check the speed bits make sense. Only one should be set. */
 518	tmp = *requested_mode & 7;
 519	switch (tmp) {
 520		case 0:
 521			printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
 522			*requested_mode |= AGPSTAT2_1X;
 523			break;
 524		case 1:
 525		case 2:
 526			break;
 527		case 3:
 528			*requested_mode &= ~(AGPSTAT2_1X);	/* rate=2 */
 529			break;
 530		case 4:
 531			break;
 532		case 5:
 533		case 6:
 534		case 7:
 535			*requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
 536			break;
 537	}
 538
 539	/* disable SBA if it's not supported */
 540	if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
 541		*bridge_agpstat &= ~AGPSTAT_SBA;
 542
 543	/* Set rate */
 544	if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
 545		*bridge_agpstat &= ~AGPSTAT2_4X;
 546
 547	if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
 548		*bridge_agpstat &= ~AGPSTAT2_2X;
 549
 550	if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
 551		*bridge_agpstat &= ~AGPSTAT2_1X;
 552
 553	/* Now we know what mode it should be, clear out the unwanted bits. */
 554	if (*bridge_agpstat & AGPSTAT2_4X)
 555		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X);	/* 4X */
 556
 557	if (*bridge_agpstat & AGPSTAT2_2X)
 558		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X);	/* 2X */
 559
 560	if (*bridge_agpstat & AGPSTAT2_1X)
 561		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);	/* 1X */
 562
 563	/* Apply any errata. */
 564	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
 565		*bridge_agpstat &= ~AGPSTAT_FW;
 566
 567	if (agp_bridge->flags & AGP_ERRATA_SBA)
 568		*bridge_agpstat &= ~AGPSTAT_SBA;
 569
 570	if (agp_bridge->flags & AGP_ERRATA_1X) {
 571		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
 572		*bridge_agpstat |= AGPSTAT2_1X;
 573	}
 574
 575	/* If we've dropped down to 1X, disable fast writes. */
 576	if (*bridge_agpstat & AGPSTAT2_1X)
 577		*bridge_agpstat &= ~AGPSTAT_FW;
 578}
 579
 580/*
 581 * requested_mode = Mode requested by (typically) X.
 582 * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
 583 * vga_agpstat = PCI_AGP_STATUS from graphic card.
 584 */
 585static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
 586{
 587	u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
 588	u32 tmp;
 589
 590	if (*requested_mode & AGP3_RESERVED_MASK) {
 591		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
 592			*requested_mode & AGP3_RESERVED_MASK, *requested_mode);
 593		*requested_mode &= ~AGP3_RESERVED_MASK;
 594	}
 595
 596	/* Check the speed bits make sense. */
 597	tmp = *requested_mode & 7;
 598	if (tmp == 0) {
 599		printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
 600		*requested_mode |= AGPSTAT3_4X;
 601	}
 602	if (tmp >= 3) {
 603		printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
 604		*requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
 605	}
 606
 607	/* ARQSZ - Set the value to the maximum one.
 608	 * Don't allow the mode register to override values. */
 609	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
 610		max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
 611
 612	/* Calibration cycle.
 613	 * Don't allow the mode register to override values. */
 614	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
 615		min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
 616
 617	/* SBA *must* be supported for AGP v3 */
 618	*bridge_agpstat |= AGPSTAT_SBA;
 619
 620	/*
 621	 * Set speed.
 622	 * Check for invalid speeds. This can happen when applications
 623	 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
 624	 */
 625	if (*requested_mode & AGPSTAT_MODE_3_0) {
 626		/*
 627		 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
 628		 * have been passed a 3.0 mode, but with 2.x speed bits set.
 629		 * AGP2.x 4x -> AGP3.0 4x.
 630		 */
 631		if (*requested_mode & AGPSTAT2_4X) {
 632			printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
 633						current->comm, *requested_mode);
 634			*requested_mode &= ~AGPSTAT2_4X;
 635			*requested_mode |= AGPSTAT3_4X;
 636		}
 637	} else {
 638		/*
 639		 * The caller doesn't know what they are doing. We are in 3.0 mode,
 640		 * but have been passed an AGP 2.x mode.
 641		 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
 642		 */
 643		printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
 644					current->comm, *requested_mode);
 645		*requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
 646		*requested_mode |= AGPSTAT3_4X;
 647	}
 648
 649	if (*requested_mode & AGPSTAT3_8X) {
 650		if (!(*bridge_agpstat & AGPSTAT3_8X)) {
 651			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 652			*bridge_agpstat |= AGPSTAT3_4X;
 653			printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
 654			return;
 655		}
 656		if (!(*vga_agpstat & AGPSTAT3_8X)) {
 657			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 658			*bridge_agpstat |= AGPSTAT3_4X;
 659			printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
 660			return;
 661		}
 662		/* All set, bridge & device can do AGP x8*/
 663		*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 664		goto done;
 665
 666	} else if (*requested_mode & AGPSTAT3_4X) {
 667		*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 668		*bridge_agpstat |= AGPSTAT3_4X;
 669		goto done;
 670
 671	} else {
 672
 673		/*
 674		 * If we didn't specify an AGP mode, we see if both
 675		 * the graphics card, and the bridge can do x8, and use if so.
 676		 * If not, we fall back to x4 mode.
 677		 */
 678		if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
 679			printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
 680				"supported by bridge & card (x8).\n");
 681			*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 682			*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 683		} else {
 684			printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
 685			if (!(*bridge_agpstat & AGPSTAT3_8X)) {
 686				printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
 687					*bridge_agpstat, origbridge);
 688				*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 689				*bridge_agpstat |= AGPSTAT3_4X;
 690			}
 691			if (!(*vga_agpstat & AGPSTAT3_8X)) {
 692				printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
 693					*vga_agpstat, origvga);
 694				*vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 695				*vga_agpstat |= AGPSTAT3_4X;
 696			}
 697		}
 698	}
 699
 700done:
 701	/* Apply any errata. */
 702	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
 703		*bridge_agpstat &= ~AGPSTAT_FW;
 704
 705	if (agp_bridge->flags & AGP_ERRATA_SBA)
 706		*bridge_agpstat &= ~AGPSTAT_SBA;
 707
 708	if (agp_bridge->flags & AGP_ERRATA_1X) {
 709		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
 710		*bridge_agpstat |= AGPSTAT2_1X;
 711	}
 712}
 713
 714
 715/**
 716 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
 717 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
 718 * @requested_mode: requested agp_stat from userspace (Typically from X)
 719 * @bridge_agpstat: current agp_stat from AGP bridge.
 720 *
 721 * This function will hunt for an AGP graphics card, and try to match
 722 * the requested mode to the capabilities of both the bridge and the card.
 723 */
 724u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
 725{
 726	struct pci_dev *device = NULL;
 727	u32 vga_agpstat;
 728	u8 cap_ptr;
 729
 730	for (;;) {
 731		device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
 732		if (!device) {
 733			printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
 734			return 0;
 735		}
 736		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
 737		if (cap_ptr)
 738			break;
 739	}
 740
 741	/*
 742	 * Ok, here we have a AGP device. Disable impossible
 743	 * settings, and adjust the readqueue to the minimum.
 744	 */
 745	pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
 746
 747	/* adjust RQ depth */
 748	bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
 749	     min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
 750		 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
 751
 752	/* disable FW if it's not supported */
 753	if (!((bridge_agpstat & AGPSTAT_FW) &&
 754		 (vga_agpstat & AGPSTAT_FW) &&
 755		 (requested_mode & AGPSTAT_FW)))
 756		bridge_agpstat &= ~AGPSTAT_FW;
 757
 758	/* Check to see if we are operating in 3.0 mode */
 759	if (agp_bridge->mode & AGPSTAT_MODE_3_0)
 760		agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
 761	else
 762		agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
 763
 764	pci_dev_put(device);
 765	return bridge_agpstat;
 766}
 767EXPORT_SYMBOL(agp_collect_device_status);
 768
 769
 770void agp_device_command(u32 bridge_agpstat, bool agp_v3)
 771{
 772	struct pci_dev *device = NULL;
 773	int mode;
 774
 775	mode = bridge_agpstat & 0x7;
 776	if (agp_v3)
 777		mode *= 4;
 778
 779	for_each_pci_dev(device) {
 780		u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
 781		if (!agp)
 782			continue;
 783
 784		dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
 785			 agp_v3 ? 3 : 2, mode);
 786		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
 787	}
 788}
 789EXPORT_SYMBOL(agp_device_command);
 790
 791
 792void get_agp_version(struct agp_bridge_data *bridge)
 793{
 794	u32 ncapid;
 795
 796	/* Exit early if already set by errata workarounds. */
 797	if (bridge->major_version != 0)
 798		return;
 799
 800	pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
 801	bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
 802	bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
 803}
 804EXPORT_SYMBOL(get_agp_version);
 805
 806
 807void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
 808{
 809	u32 bridge_agpstat, temp;
 810
 811	get_agp_version(agp_bridge);
 812
 813	dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
 814		 agp_bridge->major_version, agp_bridge->minor_version);
 815
 816	pci_read_config_dword(agp_bridge->dev,
 817		      agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
 818
 819	bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
 820	if (bridge_agpstat == 0)
 821		/* Something bad happened. FIXME: Return error code? */
 822		return;
 823
 824	bridge_agpstat |= AGPSTAT_AGP_ENABLE;
 825
 826	/* Do AGP version specific frobbing. */
 827	if (bridge->major_version >= 3) {
 828		if (bridge->mode & AGPSTAT_MODE_3_0) {
 829			/* If we have 3.5, we can do the isoch stuff. */
 830			if (bridge->minor_version >= 5)
 831				agp_3_5_enable(bridge);
 832			agp_device_command(bridge_agpstat, true);
 833			return;
 834		} else {
 835		    /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
 836		    bridge_agpstat &= ~(7<<10) ;
 837		    pci_read_config_dword(bridge->dev,
 838					bridge->capndx+AGPCTRL, &temp);
 839		    temp |= (1<<9);
 840		    pci_write_config_dword(bridge->dev,
 841					bridge->capndx+AGPCTRL, temp);
 842
 843		    dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
 844		}
 845	}
 846
 847	/* AGP v<3 */
 848	agp_device_command(bridge_agpstat, false);
 849}
 850EXPORT_SYMBOL(agp_generic_enable);
 851
 852
 853int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
 854{
 855	char *table;
 856	char *table_end;
 857	int size;
 858	int page_order;
 859	int num_entries;
 860	int i;
 861	void *temp;
 862	struct page *page;
 863
 864	/* The generic routines can't handle 2 level gatt's */
 865	if (bridge->driver->size_type == LVL2_APER_SIZE)
 866		return -EINVAL;
 867
 868	table = NULL;
 869	i = bridge->aperture_size_idx;
 870	temp = bridge->current_size;
 871	size = page_order = num_entries = 0;
 872
 873	if (bridge->driver->size_type != FIXED_APER_SIZE) {
 874		do {
 875			switch (bridge->driver->size_type) {
 876			case U8_APER_SIZE:
 877				size = A_SIZE_8(temp)->size;
 878				page_order =
 879				    A_SIZE_8(temp)->page_order;
 880				num_entries =
 881				    A_SIZE_8(temp)->num_entries;
 882				break;
 883			case U16_APER_SIZE:
 884				size = A_SIZE_16(temp)->size;
 885				page_order = A_SIZE_16(temp)->page_order;
 886				num_entries = A_SIZE_16(temp)->num_entries;
 887				break;
 888			case U32_APER_SIZE:
 889				size = A_SIZE_32(temp)->size;
 890				page_order = A_SIZE_32(temp)->page_order;
 891				num_entries = A_SIZE_32(temp)->num_entries;
 892				break;
 893				/* This case will never really happen. */
 894			case FIXED_APER_SIZE:
 895			case LVL2_APER_SIZE:
 896			default:
 897				size = page_order = num_entries = 0;
 898				break;
 899			}
 900
 901			table = alloc_gatt_pages(page_order);
 902
 903			if (table == NULL) {
 904				i++;
 905				switch (bridge->driver->size_type) {
 906				case U8_APER_SIZE:
 907					bridge->current_size = A_IDX8(bridge);
 908					break;
 909				case U16_APER_SIZE:
 910					bridge->current_size = A_IDX16(bridge);
 911					break;
 912				case U32_APER_SIZE:
 913					bridge->current_size = A_IDX32(bridge);
 914					break;
 915				/* These cases will never really happen. */
 916				case FIXED_APER_SIZE:
 917				case LVL2_APER_SIZE:
 918				default:
 919					break;
 920				}
 921				temp = bridge->current_size;
 922			} else {
 923				bridge->aperture_size_idx = i;
 924			}
 925		} while (!table && (i < bridge->driver->num_aperture_sizes));
 926	} else {
 927		size = ((struct aper_size_info_fixed *) temp)->size;
 928		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
 929		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
 930		table = alloc_gatt_pages(page_order);
 931	}
 932
 933	if (table == NULL)
 934		return -ENOMEM;
 935
 936	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
 937
 938	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
 939		SetPageReserved(page);
 940
 941	bridge->gatt_table_real = (u32 *) table;
 942	agp_gatt_table = (void *)table;
 943
 944	bridge->driver->cache_flush();
 945#ifdef CONFIG_X86
 946	if (set_memory_uc((unsigned long)table, 1 << page_order))
 947		printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
 948
 949	bridge->gatt_table = (u32 __iomem *)table;
 950#else
 951	bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
 952					(PAGE_SIZE * (1 << page_order)));
 953	bridge->driver->cache_flush();
 954#endif
 955
 956	if (bridge->gatt_table == NULL) {
 957		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
 958			ClearPageReserved(page);
 959
 960		free_gatt_pages(table, page_order);
 961
 962		return -ENOMEM;
 963	}
 964	bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
 965
 966	/* AK: bogus, should encode addresses > 4GB */
 967	for (i = 0; i < num_entries; i++) {
 968		writel(bridge->scratch_page, bridge->gatt_table+i);
 969		readl(bridge->gatt_table+i);	/* PCI Posting. */
 970	}
 971
 972	return 0;
 973}
 974EXPORT_SYMBOL(agp_generic_create_gatt_table);
 975
 976int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
 977{
 978	int page_order;
 979	char *table, *table_end;
 980	void *temp;
 981	struct page *page;
 982
 983	temp = bridge->current_size;
 984
 985	switch (bridge->driver->size_type) {
 986	case U8_APER_SIZE:
 987		page_order = A_SIZE_8(temp)->page_order;
 988		break;
 989	case U16_APER_SIZE:
 990		page_order = A_SIZE_16(temp)->page_order;
 991		break;
 992	case U32_APER_SIZE:
 993		page_order = A_SIZE_32(temp)->page_order;
 994		break;
 995	case FIXED_APER_SIZE:
 996		page_order = A_SIZE_FIX(temp)->page_order;
 997		break;
 998	case LVL2_APER_SIZE:
 999		/* The generic routines can't deal with 2 level gatt's */
1000		return -EINVAL;
 
1001	default:
1002		page_order = 0;
1003		break;
1004	}
1005
1006	/* Do not worry about freeing memory, because if this is
1007	 * called, then all agp memory is deallocated and removed
1008	 * from the table. */
1009
1010#ifdef CONFIG_X86
1011	set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
1012#else
1013	iounmap(bridge->gatt_table);
1014#endif
1015	table = (char *) bridge->gatt_table_real;
1016	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
1017
1018	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
1019		ClearPageReserved(page);
1020
1021	free_gatt_pages(bridge->gatt_table_real, page_order);
1022
1023	agp_gatt_table = NULL;
1024	bridge->gatt_table = NULL;
1025	bridge->gatt_table_real = NULL;
1026	bridge->gatt_bus_addr = 0;
1027
1028	return 0;
1029}
1030EXPORT_SYMBOL(agp_generic_free_gatt_table);
1031
1032
1033int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1034{
1035	int num_entries;
1036	size_t i;
1037	off_t j;
1038	void *temp;
1039	struct agp_bridge_data *bridge;
1040	int mask_type;
1041
1042	bridge = mem->bridge;
1043	if (!bridge)
1044		return -EINVAL;
1045
1046	if (mem->page_count == 0)
1047		return 0;
1048
1049	temp = bridge->current_size;
1050
1051	switch (bridge->driver->size_type) {
1052	case U8_APER_SIZE:
1053		num_entries = A_SIZE_8(temp)->num_entries;
1054		break;
1055	case U16_APER_SIZE:
1056		num_entries = A_SIZE_16(temp)->num_entries;
1057		break;
1058	case U32_APER_SIZE:
1059		num_entries = A_SIZE_32(temp)->num_entries;
1060		break;
1061	case FIXED_APER_SIZE:
1062		num_entries = A_SIZE_FIX(temp)->num_entries;
1063		break;
1064	case LVL2_APER_SIZE:
1065		/* The generic routines can't deal with 2 level gatt's */
1066		return -EINVAL;
 
1067	default:
1068		num_entries = 0;
1069		break;
1070	}
1071
1072	num_entries -= agp_memory_reserved/PAGE_SIZE;
1073	if (num_entries < 0) num_entries = 0;
1074
1075	if (type != mem->type)
1076		return -EINVAL;
1077
1078	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1079	if (mask_type != 0) {
1080		/* The generic routines know nothing of memory types */
1081		return -EINVAL;
1082	}
1083
1084	if (((pg_start + mem->page_count) > num_entries) ||
1085	    ((pg_start + mem->page_count) < pg_start))
1086		return -EINVAL;
1087
1088	j = pg_start;
1089
1090	while (j < (pg_start + mem->page_count)) {
1091		if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
1092			return -EBUSY;
1093		j++;
1094	}
1095
1096	if (!mem->is_flushed) {
1097		bridge->driver->cache_flush();
1098		mem->is_flushed = true;
1099	}
1100
1101	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1102		writel(bridge->driver->mask_memory(bridge,
1103						   page_to_phys(mem->pages[i]),
1104						   mask_type),
1105		       bridge->gatt_table+j);
1106	}
1107	readl(bridge->gatt_table+j-1);	/* PCI Posting. */
1108
1109	bridge->driver->tlb_flush(mem);
1110	return 0;
1111}
1112EXPORT_SYMBOL(agp_generic_insert_memory);
1113
1114
1115int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1116{
1117	size_t i;
1118	struct agp_bridge_data *bridge;
1119	int mask_type, num_entries;
1120
1121	bridge = mem->bridge;
1122	if (!bridge)
1123		return -EINVAL;
1124
1125	if (mem->page_count == 0)
1126		return 0;
1127
1128	if (type != mem->type)
1129		return -EINVAL;
1130
1131	num_entries = agp_num_entries();
1132	if (((pg_start + mem->page_count) > num_entries) ||
1133	    ((pg_start + mem->page_count) < pg_start))
1134		return -EINVAL;
1135
1136	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1137	if (mask_type != 0) {
1138		/* The generic routines know nothing of memory types */
1139		return -EINVAL;
1140	}
1141
1142	/* AK: bogus, should encode addresses > 4GB */
1143	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1144		writel(bridge->scratch_page, bridge->gatt_table+i);
1145	}
1146	readl(bridge->gatt_table+i-1);	/* PCI Posting. */
1147
1148	bridge->driver->tlb_flush(mem);
1149	return 0;
1150}
1151EXPORT_SYMBOL(agp_generic_remove_memory);
1152
1153struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1154{
1155	return NULL;
1156}
1157EXPORT_SYMBOL(agp_generic_alloc_by_type);
1158
1159void agp_generic_free_by_type(struct agp_memory *curr)
1160{
1161	agp_free_page_array(curr);
1162	agp_free_key(curr->key);
1163	kfree(curr);
1164}
1165EXPORT_SYMBOL(agp_generic_free_by_type);
1166
1167struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1168{
1169	struct agp_memory *new;
1170	int i;
1171	int pages;
1172
1173	pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1174	new = agp_create_user_memory(page_count);
1175	if (new == NULL)
1176		return NULL;
1177
1178	for (i = 0; i < page_count; i++)
1179		new->pages[i] = NULL;
1180	new->page_count = 0;
1181	new->type = type;
1182	new->num_scratch_pages = pages;
1183
1184	return new;
1185}
1186EXPORT_SYMBOL(agp_generic_alloc_user);
1187
1188/*
1189 * Basic Page Allocation Routines -
1190 * These routines handle page allocation and by default they reserve the allocated
1191 * memory.  They also handle incrementing the current_memory_agp value, Which is checked
1192 * against a maximum value.
1193 */
1194
1195int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
1196{
1197	struct page * page;
1198	int i, ret = -ENOMEM;
1199
1200	for (i = 0; i < num_pages; i++) {
1201		page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1202		/* agp_free_memory() needs gart address */
1203		if (page == NULL)
1204			goto out;
1205
1206#ifndef CONFIG_X86
1207		map_page_into_agp(page);
1208#endif
1209		get_page(page);
1210		atomic_inc(&agp_bridge->current_memory_agp);
1211
1212		mem->pages[i] = page;
1213		mem->page_count++;
1214	}
1215
1216#ifdef CONFIG_X86
1217	set_pages_array_uc(mem->pages, num_pages);
1218#endif
1219	ret = 0;
1220out:
1221	return ret;
1222}
1223EXPORT_SYMBOL(agp_generic_alloc_pages);
1224
1225struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1226{
1227	struct page * page;
1228
1229	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1230	if (page == NULL)
1231		return NULL;
1232
1233	map_page_into_agp(page);
1234
1235	get_page(page);
1236	atomic_inc(&agp_bridge->current_memory_agp);
1237	return page;
1238}
1239EXPORT_SYMBOL(agp_generic_alloc_page);
1240
1241void agp_generic_destroy_pages(struct agp_memory *mem)
1242{
1243	int i;
1244	struct page *page;
1245
1246	if (!mem)
1247		return;
1248
1249#ifdef CONFIG_X86
1250	set_pages_array_wb(mem->pages, mem->page_count);
1251#endif
1252
1253	for (i = 0; i < mem->page_count; i++) {
1254		page = mem->pages[i];
1255
1256#ifndef CONFIG_X86
1257		unmap_page_from_agp(page);
1258#endif
1259		put_page(page);
1260		__free_page(page);
1261		atomic_dec(&agp_bridge->current_memory_agp);
1262		mem->pages[i] = NULL;
1263	}
1264}
1265EXPORT_SYMBOL(agp_generic_destroy_pages);
1266
1267void agp_generic_destroy_page(struct page *page, int flags)
1268{
1269	if (page == NULL)
1270		return;
1271
1272	if (flags & AGP_PAGE_DESTROY_UNMAP)
1273		unmap_page_from_agp(page);
1274
1275	if (flags & AGP_PAGE_DESTROY_FREE) {
1276		put_page(page);
1277		__free_page(page);
1278		atomic_dec(&agp_bridge->current_memory_agp);
1279	}
1280}
1281EXPORT_SYMBOL(agp_generic_destroy_page);
1282
1283/* End Basic Page Allocation Routines */
1284
1285
1286/**
1287 * agp_enable  -  initialise the agp point-to-point connection.
1288 *
1289 * @mode:	agp mode register value to configure with.
1290 */
1291void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1292{
1293	if (!bridge)
1294		return;
1295	bridge->driver->agp_enable(bridge, mode);
1296}
1297EXPORT_SYMBOL(agp_enable);
1298
1299/* When we remove the global variable agp_bridge from all drivers
1300 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1301 */
1302
1303struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1304{
1305	if (list_empty(&agp_bridges))
1306		return NULL;
1307
1308	return agp_bridge;
1309}
1310
1311static void ipi_handler(void *null)
1312{
1313	flush_agp_cache();
1314}
1315
1316void global_cache_flush(void)
1317{
1318	if (on_each_cpu(ipi_handler, NULL, 1) != 0)
1319		panic(PFX "timed out waiting for the other CPUs!\n");
1320}
1321EXPORT_SYMBOL(global_cache_flush);
1322
1323unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1324				      dma_addr_t addr, int type)
1325{
1326	/* memory type is ignored in the generic routine */
1327	if (bridge->driver->masks)
1328		return addr | bridge->driver->masks[0].mask;
1329	else
1330		return addr;
1331}
1332EXPORT_SYMBOL(agp_generic_mask_memory);
1333
1334int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1335				  int type)
1336{
1337	if (type >= AGP_USER_TYPES)
1338		return 0;
1339	return type;
1340}
1341EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1342
1343/*
1344 * These functions are implemented according to the AGPv3 spec,
1345 * which covers implementation details that had previously been
1346 * left open.
1347 */
1348
1349int agp3_generic_fetch_size(void)
1350{
1351	u16 temp_size;
1352	int i;
1353	struct aper_size_info_16 *values;
1354
1355	pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1356	values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1357
1358	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1359		if (temp_size == values[i].size_value) {
1360			agp_bridge->previous_size =
1361				agp_bridge->current_size = (void *) (values + i);
1362
1363			agp_bridge->aperture_size_idx = i;
1364			return values[i].size;
1365		}
1366	}
1367	return 0;
1368}
1369EXPORT_SYMBOL(agp3_generic_fetch_size);
1370
1371void agp3_generic_tlbflush(struct agp_memory *mem)
1372{
1373	u32 ctrl;
1374	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1375	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1376	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1377}
1378EXPORT_SYMBOL(agp3_generic_tlbflush);
1379
1380int agp3_generic_configure(void)
1381{
1382	u32 temp;
1383	struct aper_size_info_16 *current_size;
1384
1385	current_size = A_SIZE_16(agp_bridge->current_size);
1386
1387	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
1388						    AGP_APERTURE_BAR);
1389
1390	/* set aperture size */
1391	pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1392	/* set gart pointer */
1393	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1394	/* enable aperture and GTLB */
1395	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1396	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1397	return 0;
1398}
1399EXPORT_SYMBOL(agp3_generic_configure);
1400
1401void agp3_generic_cleanup(void)
1402{
1403	u32 ctrl;
1404	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1405	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1406}
1407EXPORT_SYMBOL(agp3_generic_cleanup);
1408
1409const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1410{
1411	{4096, 1048576, 10,0x000},
1412	{2048,  524288, 9, 0x800},
1413	{1024,  262144, 8, 0xc00},
1414	{ 512,  131072, 7, 0xe00},
1415	{ 256,   65536, 6, 0xf00},
1416	{ 128,   32768, 5, 0xf20},
1417	{  64,   16384, 4, 0xf30},
1418	{  32,    8192, 3, 0xf38},
1419	{  16,    4096, 2, 0xf3c},
1420	{   8,    2048, 1, 0xf3e},
1421	{   4,    1024, 0, 0xf3f}
1422};
1423EXPORT_SYMBOL(agp3_generic_sizes);
1424