Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * AGPGART driver.
   3 * Copyright (C) 2004 Silicon Graphics, Inc.
   4 * Copyright (C) 2002-2005 Dave Jones.
   5 * Copyright (C) 1999 Jeff Hartmann.
   6 * Copyright (C) 1999 Precision Insight, Inc.
   7 * Copyright (C) 1999 Xi Graphics, Inc.
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice shall be included
  17 * in all copies or substantial portions of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
  25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 * TODO:
  28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
  29 */
  30#include <linux/module.h>
  31#include <linux/pci.h>
  32#include <linux/init.h>
  33#include <linux/pagemap.h>
  34#include <linux/miscdevice.h>
  35#include <linux/pm.h>
  36#include <linux/agp_backend.h>
  37#include <linux/vmalloc.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/mm.h>
  40#include <linux/sched.h>
  41#include <linux/slab.h>
  42#include <asm/io.h>
  43#include <asm/cacheflush.h>
  44#include <asm/pgtable.h>
  45#include "agp.h"
  46
  47__u32 *agp_gatt_table;
  48int agp_memory_reserved;
  49
  50/*
  51 * Needed by the Nforce GART driver for the time being. Would be
  52 * nice to do this some other way instead of needing this export.
  53 */
  54EXPORT_SYMBOL_GPL(agp_memory_reserved);
  55
  56/*
  57 * Generic routines for handling agp_memory structures -
  58 * They use the basic page allocation routines to do the brunt of the work.
  59 */
  60
  61void agp_free_key(int key)
  62{
  63	if (key < 0)
  64		return;
  65
  66	if (key < MAXKEY)
  67		clear_bit(key, agp_bridge->key_list);
  68}
  69EXPORT_SYMBOL(agp_free_key);
  70
  71
  72static int agp_get_key(void)
  73{
  74	int bit;
  75
  76	bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
  77	if (bit < MAXKEY) {
  78		set_bit(bit, agp_bridge->key_list);
  79		return bit;
  80	}
  81	return -1;
  82}
  83
  84/*
  85 * Use kmalloc if possible for the page list. Otherwise fall back to
  86 * vmalloc. This speeds things up and also saves memory for small AGP
  87 * regions.
  88 */
  89
  90void agp_alloc_page_array(size_t size, struct agp_memory *mem)
  91{
  92	mem->pages = NULL;
  93
  94	if (size <= 2*PAGE_SIZE)
  95		mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
  96	if (mem->pages == NULL) {
  97		mem->pages = vmalloc(size);
  98	}
  99}
 100EXPORT_SYMBOL(agp_alloc_page_array);
 101
 102void agp_free_page_array(struct agp_memory *mem)
 103{
 104	if (is_vmalloc_addr(mem->pages)) {
 105		vfree(mem->pages);
 106	} else {
 107		kfree(mem->pages);
 108	}
 109}
 110EXPORT_SYMBOL(agp_free_page_array);
 111
 112
 113static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
 114{
 115	struct agp_memory *new;
 116	unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
 117
 118	if (INT_MAX/sizeof(struct page *) < num_agp_pages)
 119		return NULL;
 120
 121	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
 122	if (new == NULL)
 123		return NULL;
 124
 125	new->key = agp_get_key();
 126
 127	if (new->key < 0) {
 128		kfree(new);
 129		return NULL;
 130	}
 131
 132	agp_alloc_page_array(alloc_size, new);
 133
 134	if (new->pages == NULL) {
 135		agp_free_key(new->key);
 136		kfree(new);
 137		return NULL;
 138	}
 139	new->num_scratch_pages = 0;
 140	return new;
 141}
 142
 143struct agp_memory *agp_create_memory(int scratch_pages)
 144{
 145	struct agp_memory *new;
 146
 147	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
 148	if (new == NULL)
 149		return NULL;
 150
 151	new->key = agp_get_key();
 152
 153	if (new->key < 0) {
 154		kfree(new);
 155		return NULL;
 156	}
 157
 158	agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
 159
 160	if (new->pages == NULL) {
 161		agp_free_key(new->key);
 162		kfree(new);
 163		return NULL;
 164	}
 165	new->num_scratch_pages = scratch_pages;
 166	new->type = AGP_NORMAL_MEMORY;
 167	return new;
 168}
 169EXPORT_SYMBOL(agp_create_memory);
 170
 171/**
 172 *	agp_free_memory - free memory associated with an agp_memory pointer.
 173 *
 174 *	@curr:		agp_memory pointer to be freed.
 175 *
 176 *	It is the only function that can be called when the backend is not owned
 177 *	by the caller.  (So it can free memory on client death.)
 178 */
 179void agp_free_memory(struct agp_memory *curr)
 180{
 181	size_t i;
 182
 183	if (curr == NULL)
 184		return;
 185
 186	if (curr->is_bound)
 187		agp_unbind_memory(curr);
 188
 189	if (curr->type >= AGP_USER_TYPES) {
 190		agp_generic_free_by_type(curr);
 191		return;
 192	}
 193
 194	if (curr->type != 0) {
 195		curr->bridge->driver->free_by_type(curr);
 196		return;
 197	}
 198	if (curr->page_count != 0) {
 199		if (curr->bridge->driver->agp_destroy_pages) {
 200			curr->bridge->driver->agp_destroy_pages(curr);
 201		} else {
 202
 203			for (i = 0; i < curr->page_count; i++) {
 204				curr->bridge->driver->agp_destroy_page(
 205					curr->pages[i],
 206					AGP_PAGE_DESTROY_UNMAP);
 207			}
 208			for (i = 0; i < curr->page_count; i++) {
 209				curr->bridge->driver->agp_destroy_page(
 210					curr->pages[i],
 211					AGP_PAGE_DESTROY_FREE);
 212			}
 213		}
 214	}
 215	agp_free_key(curr->key);
 216	agp_free_page_array(curr);
 217	kfree(curr);
 218}
 219EXPORT_SYMBOL(agp_free_memory);
 220
 221#define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
 222
 223/**
 224 *	agp_allocate_memory  -  allocate a group of pages of a certain type.
 225 *
 226 *	@page_count:	size_t argument of the number of pages
 227 *	@type:	u32 argument of the type of memory to be allocated.
 228 *
 229 *	Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
 230 *	maps to physical ram.  Any other type is device dependent.
 231 *
 232 *	It returns NULL whenever memory is unavailable.
 233 */
 234struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
 235					size_t page_count, u32 type)
 236{
 237	int scratch_pages;
 238	struct agp_memory *new;
 239	size_t i;
 240	int cur_memory;
 241
 242	if (!bridge)
 243		return NULL;
 244
 245	cur_memory = atomic_read(&bridge->current_memory_agp);
 246	if ((cur_memory + page_count > bridge->max_memory_agp) ||
 247	    (cur_memory + page_count < page_count))
 248		return NULL;
 249
 250	if (type >= AGP_USER_TYPES) {
 251		new = agp_generic_alloc_user(page_count, type);
 252		if (new)
 253			new->bridge = bridge;
 254		return new;
 255	}
 256
 257	if (type != 0) {
 258		new = bridge->driver->alloc_by_type(page_count, type);
 259		if (new)
 260			new->bridge = bridge;
 261		return new;
 262	}
 263
 264	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
 265
 266	new = agp_create_memory(scratch_pages);
 267
 268	if (new == NULL)
 269		return NULL;
 270
 271	if (bridge->driver->agp_alloc_pages) {
 272		if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
 273			agp_free_memory(new);
 274			return NULL;
 275		}
 276		new->bridge = bridge;
 277		return new;
 278	}
 279
 280	for (i = 0; i < page_count; i++) {
 281		struct page *page = bridge->driver->agp_alloc_page(bridge);
 282
 283		if (page == NULL) {
 284			agp_free_memory(new);
 285			return NULL;
 286		}
 287		new->pages[i] = page;
 288		new->page_count++;
 289	}
 290	new->bridge = bridge;
 291
 292	return new;
 293}
 294EXPORT_SYMBOL(agp_allocate_memory);
 295
 296
 297/* End - Generic routines for handling agp_memory structures */
 298
 299
 300static int agp_return_size(void)
 301{
 302	int current_size;
 303	void *temp;
 304
 305	temp = agp_bridge->current_size;
 306
 307	switch (agp_bridge->driver->size_type) {
 308	case U8_APER_SIZE:
 309		current_size = A_SIZE_8(temp)->size;
 310		break;
 311	case U16_APER_SIZE:
 312		current_size = A_SIZE_16(temp)->size;
 313		break;
 314	case U32_APER_SIZE:
 315		current_size = A_SIZE_32(temp)->size;
 316		break;
 317	case LVL2_APER_SIZE:
 318		current_size = A_SIZE_LVL2(temp)->size;
 319		break;
 320	case FIXED_APER_SIZE:
 321		current_size = A_SIZE_FIX(temp)->size;
 322		break;
 323	default:
 324		current_size = 0;
 325		break;
 326	}
 327
 328	current_size -= (agp_memory_reserved / (1024*1024));
 329	if (current_size <0)
 330		current_size = 0;
 331	return current_size;
 332}
 333
 334
 335int agp_num_entries(void)
 336{
 337	int num_entries;
 338	void *temp;
 339
 340	temp = agp_bridge->current_size;
 341
 342	switch (agp_bridge->driver->size_type) {
 343	case U8_APER_SIZE:
 344		num_entries = A_SIZE_8(temp)->num_entries;
 345		break;
 346	case U16_APER_SIZE:
 347		num_entries = A_SIZE_16(temp)->num_entries;
 348		break;
 349	case U32_APER_SIZE:
 350		num_entries = A_SIZE_32(temp)->num_entries;
 351		break;
 352	case LVL2_APER_SIZE:
 353		num_entries = A_SIZE_LVL2(temp)->num_entries;
 354		break;
 355	case FIXED_APER_SIZE:
 356		num_entries = A_SIZE_FIX(temp)->num_entries;
 357		break;
 358	default:
 359		num_entries = 0;
 360		break;
 361	}
 362
 363	num_entries -= agp_memory_reserved>>PAGE_SHIFT;
 364	if (num_entries<0)
 365		num_entries = 0;
 366	return num_entries;
 367}
 368EXPORT_SYMBOL_GPL(agp_num_entries);
 369
 370
 371/**
 372 *	agp_copy_info  -  copy bridge state information
 373 *
 374 *	@info:		agp_kern_info pointer.  The caller should insure that this pointer is valid.
 375 *
 376 *	This function copies information about the agp bridge device and the state of
 377 *	the agp backend into an agp_kern_info pointer.
 378 */
 379int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
 380{
 381	memset(info, 0, sizeof(struct agp_kern_info));
 382	if (!bridge) {
 383		info->chipset = NOT_SUPPORTED;
 384		return -EIO;
 385	}
 386
 387	info->version.major = bridge->version->major;
 388	info->version.minor = bridge->version->minor;
 389	info->chipset = SUPPORTED;
 390	info->device = bridge->dev;
 391	if (bridge->mode & AGPSTAT_MODE_3_0)
 392		info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
 393	else
 394		info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
 395	info->aper_base = bridge->gart_bus_addr;
 396	info->aper_size = agp_return_size();
 397	info->max_memory = bridge->max_memory_agp;
 398	info->current_memory = atomic_read(&bridge->current_memory_agp);
 399	info->cant_use_aperture = bridge->driver->cant_use_aperture;
 400	info->vm_ops = bridge->vm_ops;
 401	info->page_mask = ~0UL;
 402	return 0;
 403}
 404EXPORT_SYMBOL(agp_copy_info);
 405
 406/* End - Routine to copy over information structure */
 407
 408/*
 409 * Routines for handling swapping of agp_memory into the GATT -
 410 * These routines take agp_memory and insert them into the GATT.
 411 * They call device specific routines to actually write to the GATT.
 412 */
 413
 414/**
 415 *	agp_bind_memory  -  Bind an agp_memory structure into the GATT.
 416 *
 417 *	@curr:		agp_memory pointer
 418 *	@pg_start:	an offset into the graphics aperture translation table
 419 *
 420 *	It returns -EINVAL if the pointer == NULL.
 421 *	It returns -EBUSY if the area of the table requested is already in use.
 422 */
 423int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
 424{
 425	int ret_val;
 426
 427	if (curr == NULL)
 428		return -EINVAL;
 429
 430	if (curr->is_bound) {
 431		printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
 432		return -EINVAL;
 433	}
 434	if (!curr->is_flushed) {
 435		curr->bridge->driver->cache_flush();
 436		curr->is_flushed = true;
 437	}
 438
 439	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
 440
 441	if (ret_val != 0)
 442		return ret_val;
 443
 444	curr->is_bound = true;
 445	curr->pg_start = pg_start;
 446	spin_lock(&agp_bridge->mapped_lock);
 447	list_add(&curr->mapped_list, &agp_bridge->mapped_list);
 448	spin_unlock(&agp_bridge->mapped_lock);
 449
 450	return 0;
 451}
 452EXPORT_SYMBOL(agp_bind_memory);
 453
 454
 455/**
 456 *	agp_unbind_memory  -  Removes an agp_memory structure from the GATT
 457 *
 458 * @curr:	agp_memory pointer to be removed from the GATT.
 459 *
 460 * It returns -EINVAL if this piece of agp_memory is not currently bound to
 461 * the graphics aperture translation table or if the agp_memory pointer == NULL
 462 */
 463int agp_unbind_memory(struct agp_memory *curr)
 464{
 465	int ret_val;
 466
 467	if (curr == NULL)
 468		return -EINVAL;
 469
 470	if (!curr->is_bound) {
 471		printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
 472		return -EINVAL;
 473	}
 474
 475	ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
 476
 477	if (ret_val != 0)
 478		return ret_val;
 479
 480	curr->is_bound = false;
 481	curr->pg_start = 0;
 482	spin_lock(&curr->bridge->mapped_lock);
 483	list_del(&curr->mapped_list);
 484	spin_unlock(&curr->bridge->mapped_lock);
 485	return 0;
 486}
 487EXPORT_SYMBOL(agp_unbind_memory);
 488
 489
 490/* End - Routines for handling swapping of agp_memory into the GATT */
 491
 492
 493/* Generic Agp routines - Start */
 494static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
 495{
 496	u32 tmp;
 497
 498	if (*requested_mode & AGP2_RESERVED_MASK) {
 499		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
 500			*requested_mode & AGP2_RESERVED_MASK, *requested_mode);
 501		*requested_mode &= ~AGP2_RESERVED_MASK;
 502	}
 503
 504	/*
 505	 * Some dumb bridges are programmed to disobey the AGP2 spec.
 506	 * This is likely a BIOS misprogramming rather than poweron default, or
 507	 * it would be a lot more common.
 508	 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
 509	 * AGPv2 spec 6.1.9 states:
 510	 *   The RATE field indicates the data transfer rates supported by this
 511	 *   device. A.G.P. devices must report all that apply.
 512	 * Fix them up as best we can.
 513	 */
 514	switch (*bridge_agpstat & 7) {
 515	case 4:
 516		*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
 517		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
 518			"Fixing up support for x2 & x1\n");
 519		break;
 520	case 2:
 521		*bridge_agpstat |= AGPSTAT2_1X;
 522		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
 523			"Fixing up support for x1\n");
 524		break;
 525	default:
 526		break;
 527	}
 528
 529	/* Check the speed bits make sense. Only one should be set. */
 530	tmp = *requested_mode & 7;
 531	switch (tmp) {
 532		case 0:
 533			printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
 534			*requested_mode |= AGPSTAT2_1X;
 535			break;
 536		case 1:
 537		case 2:
 538			break;
 539		case 3:
 540			*requested_mode &= ~(AGPSTAT2_1X);	/* rate=2 */
 541			break;
 542		case 4:
 543			break;
 544		case 5:
 545		case 6:
 546		case 7:
 547			*requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
 548			break;
 549	}
 550
 551	/* disable SBA if it's not supported */
 552	if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
 553		*bridge_agpstat &= ~AGPSTAT_SBA;
 554
 555	/* Set rate */
 556	if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
 557		*bridge_agpstat &= ~AGPSTAT2_4X;
 558
 559	if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
 560		*bridge_agpstat &= ~AGPSTAT2_2X;
 561
 562	if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
 563		*bridge_agpstat &= ~AGPSTAT2_1X;
 564
 565	/* Now we know what mode it should be, clear out the unwanted bits. */
 566	if (*bridge_agpstat & AGPSTAT2_4X)
 567		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X);	/* 4X */
 568
 569	if (*bridge_agpstat & AGPSTAT2_2X)
 570		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X);	/* 2X */
 571
 572	if (*bridge_agpstat & AGPSTAT2_1X)
 573		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);	/* 1X */
 574
 575	/* Apply any errata. */
 576	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
 577		*bridge_agpstat &= ~AGPSTAT_FW;
 578
 579	if (agp_bridge->flags & AGP_ERRATA_SBA)
 580		*bridge_agpstat &= ~AGPSTAT_SBA;
 581
 582	if (agp_bridge->flags & AGP_ERRATA_1X) {
 583		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
 584		*bridge_agpstat |= AGPSTAT2_1X;
 585	}
 586
 587	/* If we've dropped down to 1X, disable fast writes. */
 588	if (*bridge_agpstat & AGPSTAT2_1X)
 589		*bridge_agpstat &= ~AGPSTAT_FW;
 590}
 591
 592/*
 593 * requested_mode = Mode requested by (typically) X.
 594 * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
 595 * vga_agpstat = PCI_AGP_STATUS from graphic card.
 596 */
 597static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
 598{
 599	u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
 600	u32 tmp;
 601
 602	if (*requested_mode & AGP3_RESERVED_MASK) {
 603		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
 604			*requested_mode & AGP3_RESERVED_MASK, *requested_mode);
 605		*requested_mode &= ~AGP3_RESERVED_MASK;
 606	}
 607
 608	/* Check the speed bits make sense. */
 609	tmp = *requested_mode & 7;
 610	if (tmp == 0) {
 611		printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
 612		*requested_mode |= AGPSTAT3_4X;
 613	}
 614	if (tmp >= 3) {
 615		printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
 616		*requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
 617	}
 618
 619	/* ARQSZ - Set the value to the maximum one.
 620	 * Don't allow the mode register to override values. */
 621	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
 622		max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
 623
 624	/* Calibration cycle.
 625	 * Don't allow the mode register to override values. */
 626	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
 627		min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
 628
 629	/* SBA *must* be supported for AGP v3 */
 630	*bridge_agpstat |= AGPSTAT_SBA;
 631
 632	/*
 633	 * Set speed.
 634	 * Check for invalid speeds. This can happen when applications
 635	 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
 636	 */
 637	if (*requested_mode & AGPSTAT_MODE_3_0) {
 638		/*
 639		 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
 640		 * have been passed a 3.0 mode, but with 2.x speed bits set.
 641		 * AGP2.x 4x -> AGP3.0 4x.
 642		 */
 643		if (*requested_mode & AGPSTAT2_4X) {
 644			printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
 645						current->comm, *requested_mode);
 646			*requested_mode &= ~AGPSTAT2_4X;
 647			*requested_mode |= AGPSTAT3_4X;
 648		}
 649	} else {
 650		/*
 651		 * The caller doesn't know what they are doing. We are in 3.0 mode,
 652		 * but have been passed an AGP 2.x mode.
 653		 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
 654		 */
 655		printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
 656					current->comm, *requested_mode);
 657		*requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
 658		*requested_mode |= AGPSTAT3_4X;
 659	}
 660
 661	if (*requested_mode & AGPSTAT3_8X) {
 662		if (!(*bridge_agpstat & AGPSTAT3_8X)) {
 663			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 664			*bridge_agpstat |= AGPSTAT3_4X;
 665			printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
 666			return;
 667		}
 668		if (!(*vga_agpstat & AGPSTAT3_8X)) {
 669			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 670			*bridge_agpstat |= AGPSTAT3_4X;
 671			printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
 672			return;
 673		}
 674		/* All set, bridge & device can do AGP x8*/
 675		*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 676		goto done;
 677
 678	} else if (*requested_mode & AGPSTAT3_4X) {
 679		*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 680		*bridge_agpstat |= AGPSTAT3_4X;
 681		goto done;
 682
 683	} else {
 684
 685		/*
 686		 * If we didn't specify an AGP mode, we see if both
 687		 * the graphics card, and the bridge can do x8, and use if so.
 688		 * If not, we fall back to x4 mode.
 689		 */
 690		if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
 691			printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
 692				"supported by bridge & card (x8).\n");
 693			*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 694			*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 695		} else {
 696			printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
 697			if (!(*bridge_agpstat & AGPSTAT3_8X)) {
 698				printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
 699					*bridge_agpstat, origbridge);
 700				*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 701				*bridge_agpstat |= AGPSTAT3_4X;
 702			}
 703			if (!(*vga_agpstat & AGPSTAT3_8X)) {
 704				printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
 705					*vga_agpstat, origvga);
 706				*vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 707				*vga_agpstat |= AGPSTAT3_4X;
 708			}
 709		}
 710	}
 711
 712done:
 713	/* Apply any errata. */
 714	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
 715		*bridge_agpstat &= ~AGPSTAT_FW;
 716
 717	if (agp_bridge->flags & AGP_ERRATA_SBA)
 718		*bridge_agpstat &= ~AGPSTAT_SBA;
 719
 720	if (agp_bridge->flags & AGP_ERRATA_1X) {
 721		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
 722		*bridge_agpstat |= AGPSTAT2_1X;
 723	}
 724}
 725
 726
 727/**
 728 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
 729 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
 730 * @requested_mode: requested agp_stat from userspace (Typically from X)
 731 * @bridge_agpstat: current agp_stat from AGP bridge.
 732 *
 733 * This function will hunt for an AGP graphics card, and try to match
 734 * the requested mode to the capabilities of both the bridge and the card.
 735 */
 736u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
 737{
 738	struct pci_dev *device = NULL;
 739	u32 vga_agpstat;
 740	u8 cap_ptr;
 741
 742	for (;;) {
 743		device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
 744		if (!device) {
 745			printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
 746			return 0;
 747		}
 748		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
 749		if (cap_ptr)
 750			break;
 751	}
 752
 753	/*
 754	 * Ok, here we have a AGP device. Disable impossible
 755	 * settings, and adjust the readqueue to the minimum.
 756	 */
 757	pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
 758
 759	/* adjust RQ depth */
 760	bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
 761	     min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
 762		 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
 763
 764	/* disable FW if it's not supported */
 765	if (!((bridge_agpstat & AGPSTAT_FW) &&
 766		 (vga_agpstat & AGPSTAT_FW) &&
 767		 (requested_mode & AGPSTAT_FW)))
 768		bridge_agpstat &= ~AGPSTAT_FW;
 769
 770	/* Check to see if we are operating in 3.0 mode */
 771	if (agp_bridge->mode & AGPSTAT_MODE_3_0)
 772		agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
 773	else
 774		agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
 775
 776	pci_dev_put(device);
 777	return bridge_agpstat;
 778}
 779EXPORT_SYMBOL(agp_collect_device_status);
 780
 781
 782void agp_device_command(u32 bridge_agpstat, bool agp_v3)
 783{
 784	struct pci_dev *device = NULL;
 785	int mode;
 786
 787	mode = bridge_agpstat & 0x7;
 788	if (agp_v3)
 789		mode *= 4;
 790
 791	for_each_pci_dev(device) {
 792		u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
 793		if (!agp)
 794			continue;
 795
 796		dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
 797			 agp_v3 ? 3 : 2, mode);
 798		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
 799	}
 800}
 801EXPORT_SYMBOL(agp_device_command);
 802
 803
 804void get_agp_version(struct agp_bridge_data *bridge)
 805{
 806	u32 ncapid;
 807
 808	/* Exit early if already set by errata workarounds. */
 809	if (bridge->major_version != 0)
 810		return;
 811
 812	pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
 813	bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
 814	bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
 815}
 816EXPORT_SYMBOL(get_agp_version);
 817
 818
 819void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
 820{
 821	u32 bridge_agpstat, temp;
 822
 823	get_agp_version(agp_bridge);
 824
 825	dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
 826		 agp_bridge->major_version, agp_bridge->minor_version);
 827
 828	pci_read_config_dword(agp_bridge->dev,
 829		      agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
 830
 831	bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
 832	if (bridge_agpstat == 0)
 833		/* Something bad happened. FIXME: Return error code? */
 834		return;
 835
 836	bridge_agpstat |= AGPSTAT_AGP_ENABLE;
 837
 838	/* Do AGP version specific frobbing. */
 839	if (bridge->major_version >= 3) {
 840		if (bridge->mode & AGPSTAT_MODE_3_0) {
 841			/* If we have 3.5, we can do the isoch stuff. */
 842			if (bridge->minor_version >= 5)
 843				agp_3_5_enable(bridge);
 844			agp_device_command(bridge_agpstat, true);
 845			return;
 846		} else {
 847		    /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
 848		    bridge_agpstat &= ~(7<<10) ;
 849		    pci_read_config_dword(bridge->dev,
 850					bridge->capndx+AGPCTRL, &temp);
 851		    temp |= (1<<9);
 852		    pci_write_config_dword(bridge->dev,
 853					bridge->capndx+AGPCTRL, temp);
 854
 855		    dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
 856		}
 857	}
 858
 859	/* AGP v<3 */
 860	agp_device_command(bridge_agpstat, false);
 861}
 862EXPORT_SYMBOL(agp_generic_enable);
 863
 864
 865int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
 866{
 867	char *table;
 868	char *table_end;
 869	int size;
 870	int page_order;
 871	int num_entries;
 872	int i;
 873	void *temp;
 874	struct page *page;
 875
 876	/* The generic routines can't handle 2 level gatt's */
 877	if (bridge->driver->size_type == LVL2_APER_SIZE)
 878		return -EINVAL;
 879
 880	table = NULL;
 881	i = bridge->aperture_size_idx;
 882	temp = bridge->current_size;
 883	size = page_order = num_entries = 0;
 884
 885	if (bridge->driver->size_type != FIXED_APER_SIZE) {
 886		do {
 887			switch (bridge->driver->size_type) {
 888			case U8_APER_SIZE:
 889				size = A_SIZE_8(temp)->size;
 890				page_order =
 891				    A_SIZE_8(temp)->page_order;
 892				num_entries =
 893				    A_SIZE_8(temp)->num_entries;
 894				break;
 895			case U16_APER_SIZE:
 896				size = A_SIZE_16(temp)->size;
 897				page_order = A_SIZE_16(temp)->page_order;
 898				num_entries = A_SIZE_16(temp)->num_entries;
 899				break;
 900			case U32_APER_SIZE:
 901				size = A_SIZE_32(temp)->size;
 902				page_order = A_SIZE_32(temp)->page_order;
 903				num_entries = A_SIZE_32(temp)->num_entries;
 904				break;
 905				/* This case will never really happen. */
 906			case FIXED_APER_SIZE:
 907			case LVL2_APER_SIZE:
 908			default:
 909				size = page_order = num_entries = 0;
 910				break;
 911			}
 912
 913			table = alloc_gatt_pages(page_order);
 914
 915			if (table == NULL) {
 916				i++;
 917				switch (bridge->driver->size_type) {
 918				case U8_APER_SIZE:
 919					bridge->current_size = A_IDX8(bridge);
 920					break;
 921				case U16_APER_SIZE:
 922					bridge->current_size = A_IDX16(bridge);
 923					break;
 924				case U32_APER_SIZE:
 925					bridge->current_size = A_IDX32(bridge);
 926					break;
 927				/* These cases will never really happen. */
 928				case FIXED_APER_SIZE:
 929				case LVL2_APER_SIZE:
 930				default:
 931					break;
 932				}
 933				temp = bridge->current_size;
 934			} else {
 935				bridge->aperture_size_idx = i;
 936			}
 937		} while (!table && (i < bridge->driver->num_aperture_sizes));
 938	} else {
 939		size = ((struct aper_size_info_fixed *) temp)->size;
 940		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
 941		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
 942		table = alloc_gatt_pages(page_order);
 943	}
 944
 945	if (table == NULL)
 946		return -ENOMEM;
 947
 948	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
 949
 950	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
 951		SetPageReserved(page);
 952
 953	bridge->gatt_table_real = (u32 *) table;
 954	agp_gatt_table = (void *)table;
 955
 956	bridge->driver->cache_flush();
 957#ifdef CONFIG_X86
 958	if (set_memory_uc((unsigned long)table, 1 << page_order))
 959		printk(KERN_WARNING "Could not set GATT table memory to UC!");
 960
 961	bridge->gatt_table = (void *)table;
 962#else
 963	bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
 964					(PAGE_SIZE * (1 << page_order)));
 965	bridge->driver->cache_flush();
 966#endif
 967
 968	if (bridge->gatt_table == NULL) {
 969		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
 970			ClearPageReserved(page);
 971
 972		free_gatt_pages(table, page_order);
 973
 974		return -ENOMEM;
 975	}
 976	bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
 977
 978	/* AK: bogus, should encode addresses > 4GB */
 979	for (i = 0; i < num_entries; i++) {
 980		writel(bridge->scratch_page, bridge->gatt_table+i);
 981		readl(bridge->gatt_table+i);	/* PCI Posting. */
 982	}
 983
 984	return 0;
 985}
 986EXPORT_SYMBOL(agp_generic_create_gatt_table);
 987
 988int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
 989{
 990	int page_order;
 991	char *table, *table_end;
 992	void *temp;
 993	struct page *page;
 994
 995	temp = bridge->current_size;
 996
 997	switch (bridge->driver->size_type) {
 998	case U8_APER_SIZE:
 999		page_order = A_SIZE_8(temp)->page_order;
1000		break;
1001	case U16_APER_SIZE:
1002		page_order = A_SIZE_16(temp)->page_order;
1003		break;
1004	case U32_APER_SIZE:
1005		page_order = A_SIZE_32(temp)->page_order;
1006		break;
1007	case FIXED_APER_SIZE:
1008		page_order = A_SIZE_FIX(temp)->page_order;
1009		break;
1010	case LVL2_APER_SIZE:
1011		/* The generic routines can't deal with 2 level gatt's */
1012		return -EINVAL;
1013		break;
1014	default:
1015		page_order = 0;
1016		break;
1017	}
1018
1019	/* Do not worry about freeing memory, because if this is
1020	 * called, then all agp memory is deallocated and removed
1021	 * from the table. */
1022
1023#ifdef CONFIG_X86
1024	set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
1025#else
1026	iounmap(bridge->gatt_table);
1027#endif
1028	table = (char *) bridge->gatt_table_real;
1029	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
1030
1031	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
1032		ClearPageReserved(page);
1033
1034	free_gatt_pages(bridge->gatt_table_real, page_order);
1035
1036	agp_gatt_table = NULL;
1037	bridge->gatt_table = NULL;
1038	bridge->gatt_table_real = NULL;
1039	bridge->gatt_bus_addr = 0;
1040
1041	return 0;
1042}
1043EXPORT_SYMBOL(agp_generic_free_gatt_table);
1044
1045
1046int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1047{
1048	int num_entries;
1049	size_t i;
1050	off_t j;
1051	void *temp;
1052	struct agp_bridge_data *bridge;
1053	int mask_type;
1054
1055	bridge = mem->bridge;
1056	if (!bridge)
1057		return -EINVAL;
1058
1059	if (mem->page_count == 0)
1060		return 0;
1061
1062	temp = bridge->current_size;
1063
1064	switch (bridge->driver->size_type) {
1065	case U8_APER_SIZE:
1066		num_entries = A_SIZE_8(temp)->num_entries;
1067		break;
1068	case U16_APER_SIZE:
1069		num_entries = A_SIZE_16(temp)->num_entries;
1070		break;
1071	case U32_APER_SIZE:
1072		num_entries = A_SIZE_32(temp)->num_entries;
1073		break;
1074	case FIXED_APER_SIZE:
1075		num_entries = A_SIZE_FIX(temp)->num_entries;
1076		break;
1077	case LVL2_APER_SIZE:
1078		/* The generic routines can't deal with 2 level gatt's */
1079		return -EINVAL;
1080		break;
1081	default:
1082		num_entries = 0;
1083		break;
1084	}
1085
1086	num_entries -= agp_memory_reserved/PAGE_SIZE;
1087	if (num_entries < 0) num_entries = 0;
1088
1089	if (type != mem->type)
1090		return -EINVAL;
1091
1092	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1093	if (mask_type != 0) {
1094		/* The generic routines know nothing of memory types */
1095		return -EINVAL;
1096	}
1097
1098	if (((pg_start + mem->page_count) > num_entries) ||
1099	    ((pg_start + mem->page_count) < pg_start))
1100		return -EINVAL;
1101
1102	j = pg_start;
1103
1104	while (j < (pg_start + mem->page_count)) {
1105		if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
1106			return -EBUSY;
1107		j++;
1108	}
1109
1110	if (!mem->is_flushed) {
1111		bridge->driver->cache_flush();
1112		mem->is_flushed = true;
1113	}
1114
1115	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1116		writel(bridge->driver->mask_memory(bridge,
1117						   page_to_phys(mem->pages[i]),
1118						   mask_type),
1119		       bridge->gatt_table+j);
1120	}
1121	readl(bridge->gatt_table+j-1);	/* PCI Posting. */
1122
1123	bridge->driver->tlb_flush(mem);
1124	return 0;
1125}
1126EXPORT_SYMBOL(agp_generic_insert_memory);
1127
1128
1129int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1130{
1131	size_t i;
1132	struct agp_bridge_data *bridge;
1133	int mask_type, num_entries;
1134
1135	bridge = mem->bridge;
1136	if (!bridge)
1137		return -EINVAL;
1138
1139	if (mem->page_count == 0)
1140		return 0;
1141
1142	if (type != mem->type)
1143		return -EINVAL;
1144
1145	num_entries = agp_num_entries();
1146	if (((pg_start + mem->page_count) > num_entries) ||
1147	    ((pg_start + mem->page_count) < pg_start))
1148		return -EINVAL;
1149
1150	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1151	if (mask_type != 0) {
1152		/* The generic routines know nothing of memory types */
1153		return -EINVAL;
1154	}
1155
1156	/* AK: bogus, should encode addresses > 4GB */
1157	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1158		writel(bridge->scratch_page, bridge->gatt_table+i);
1159	}
1160	readl(bridge->gatt_table+i-1);	/* PCI Posting. */
1161
1162	bridge->driver->tlb_flush(mem);
1163	return 0;
1164}
1165EXPORT_SYMBOL(agp_generic_remove_memory);
1166
1167struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1168{
1169	return NULL;
1170}
1171EXPORT_SYMBOL(agp_generic_alloc_by_type);
1172
1173void agp_generic_free_by_type(struct agp_memory *curr)
1174{
1175	agp_free_page_array(curr);
1176	agp_free_key(curr->key);
1177	kfree(curr);
1178}
1179EXPORT_SYMBOL(agp_generic_free_by_type);
1180
1181struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1182{
1183	struct agp_memory *new;
1184	int i;
1185	int pages;
1186
1187	pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1188	new = agp_create_user_memory(page_count);
1189	if (new == NULL)
1190		return NULL;
1191
1192	for (i = 0; i < page_count; i++)
1193		new->pages[i] = NULL;
1194	new->page_count = 0;
1195	new->type = type;
1196	new->num_scratch_pages = pages;
1197
1198	return new;
1199}
1200EXPORT_SYMBOL(agp_generic_alloc_user);
1201
1202/*
1203 * Basic Page Allocation Routines -
1204 * These routines handle page allocation and by default they reserve the allocated
1205 * memory.  They also handle incrementing the current_memory_agp value, Which is checked
1206 * against a maximum value.
1207 */
1208
1209int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
1210{
1211	struct page * page;
1212	int i, ret = -ENOMEM;
1213
1214	for (i = 0; i < num_pages; i++) {
1215		page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1216		/* agp_free_memory() needs gart address */
1217		if (page == NULL)
1218			goto out;
1219
1220#ifndef CONFIG_X86
1221		map_page_into_agp(page);
1222#endif
1223		get_page(page);
1224		atomic_inc(&agp_bridge->current_memory_agp);
1225
1226		mem->pages[i] = page;
1227		mem->page_count++;
1228	}
1229
1230#ifdef CONFIG_X86
1231	set_pages_array_uc(mem->pages, num_pages);
1232#endif
1233	ret = 0;
1234out:
1235	return ret;
1236}
1237EXPORT_SYMBOL(agp_generic_alloc_pages);
1238
1239struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1240{
1241	struct page * page;
1242
1243	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1244	if (page == NULL)
1245		return NULL;
1246
1247	map_page_into_agp(page);
1248
1249	get_page(page);
1250	atomic_inc(&agp_bridge->current_memory_agp);
1251	return page;
1252}
1253EXPORT_SYMBOL(agp_generic_alloc_page);
1254
1255void agp_generic_destroy_pages(struct agp_memory *mem)
1256{
1257	int i;
1258	struct page *page;
1259
1260	if (!mem)
1261		return;
1262
1263#ifdef CONFIG_X86
1264	set_pages_array_wb(mem->pages, mem->page_count);
1265#endif
1266
1267	for (i = 0; i < mem->page_count; i++) {
1268		page = mem->pages[i];
1269
1270#ifndef CONFIG_X86
1271		unmap_page_from_agp(page);
1272#endif
1273		put_page(page);
1274		__free_page(page);
1275		atomic_dec(&agp_bridge->current_memory_agp);
1276		mem->pages[i] = NULL;
1277	}
1278}
1279EXPORT_SYMBOL(agp_generic_destroy_pages);
1280
1281void agp_generic_destroy_page(struct page *page, int flags)
1282{
1283	if (page == NULL)
1284		return;
1285
1286	if (flags & AGP_PAGE_DESTROY_UNMAP)
1287		unmap_page_from_agp(page);
1288
1289	if (flags & AGP_PAGE_DESTROY_FREE) {
1290		put_page(page);
1291		__free_page(page);
1292		atomic_dec(&agp_bridge->current_memory_agp);
1293	}
1294}
1295EXPORT_SYMBOL(agp_generic_destroy_page);
1296
1297/* End Basic Page Allocation Routines */
1298
1299
1300/**
1301 * agp_enable  -  initialise the agp point-to-point connection.
1302 *
1303 * @mode:	agp mode register value to configure with.
1304 */
1305void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1306{
1307	if (!bridge)
1308		return;
1309	bridge->driver->agp_enable(bridge, mode);
1310}
1311EXPORT_SYMBOL(agp_enable);
1312
1313/* When we remove the global variable agp_bridge from all drivers
1314 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1315 */
1316
1317struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1318{
1319	if (list_empty(&agp_bridges))
1320		return NULL;
1321
1322	return agp_bridge;
1323}
1324
1325static void ipi_handler(void *null)
1326{
1327	flush_agp_cache();
1328}
1329
1330void global_cache_flush(void)
1331{
1332	if (on_each_cpu(ipi_handler, NULL, 1) != 0)
1333		panic(PFX "timed out waiting for the other CPUs!\n");
1334}
1335EXPORT_SYMBOL(global_cache_flush);
1336
1337unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1338				      dma_addr_t addr, int type)
1339{
1340	/* memory type is ignored in the generic routine */
1341	if (bridge->driver->masks)
1342		return addr | bridge->driver->masks[0].mask;
1343	else
1344		return addr;
1345}
1346EXPORT_SYMBOL(agp_generic_mask_memory);
1347
1348int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1349				  int type)
1350{
1351	if (type >= AGP_USER_TYPES)
1352		return 0;
1353	return type;
1354}
1355EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1356
1357/*
1358 * These functions are implemented according to the AGPv3 spec,
1359 * which covers implementation details that had previously been
1360 * left open.
1361 */
1362
1363int agp3_generic_fetch_size(void)
1364{
1365	u16 temp_size;
1366	int i;
1367	struct aper_size_info_16 *values;
1368
1369	pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1370	values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1371
1372	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1373		if (temp_size == values[i].size_value) {
1374			agp_bridge->previous_size =
1375				agp_bridge->current_size = (void *) (values + i);
1376
1377			agp_bridge->aperture_size_idx = i;
1378			return values[i].size;
1379		}
1380	}
1381	return 0;
1382}
1383EXPORT_SYMBOL(agp3_generic_fetch_size);
1384
1385void agp3_generic_tlbflush(struct agp_memory *mem)
1386{
1387	u32 ctrl;
1388	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1389	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1390	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1391}
1392EXPORT_SYMBOL(agp3_generic_tlbflush);
1393
1394int agp3_generic_configure(void)
1395{
1396	u32 temp;
1397	struct aper_size_info_16 *current_size;
1398
1399	current_size = A_SIZE_16(agp_bridge->current_size);
1400
1401	pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1402	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1403
1404	/* set aperture size */
1405	pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1406	/* set gart pointer */
1407	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1408	/* enable aperture and GTLB */
1409	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1410	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1411	return 0;
1412}
1413EXPORT_SYMBOL(agp3_generic_configure);
1414
1415void agp3_generic_cleanup(void)
1416{
1417	u32 ctrl;
1418	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1419	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1420}
1421EXPORT_SYMBOL(agp3_generic_cleanup);
1422
1423const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1424{
1425	{4096, 1048576, 10,0x000},
1426	{2048,  524288, 9, 0x800},
1427	{1024,  262144, 8, 0xc00},
1428	{ 512,  131072, 7, 0xe00},
1429	{ 256,   65536, 6, 0xf00},
1430	{ 128,   32768, 5, 0xf20},
1431	{  64,   16384, 4, 0xf30},
1432	{  32,    8192, 3, 0xf38},
1433	{  16,    4096, 2, 0xf3c},
1434	{   8,    2048, 1, 0xf3e},
1435	{   4,    1024, 0, 0xf3f}
1436};
1437EXPORT_SYMBOL(agp3_generic_sizes);
1438
v3.15
   1/*
   2 * AGPGART driver.
   3 * Copyright (C) 2004 Silicon Graphics, Inc.
   4 * Copyright (C) 2002-2005 Dave Jones.
   5 * Copyright (C) 1999 Jeff Hartmann.
   6 * Copyright (C) 1999 Precision Insight, Inc.
   7 * Copyright (C) 1999 Xi Graphics, Inc.
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice shall be included
  17 * in all copies or substantial portions of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
  25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 * TODO:
  28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
  29 */
  30#include <linux/module.h>
  31#include <linux/pci.h>
 
  32#include <linux/pagemap.h>
  33#include <linux/miscdevice.h>
  34#include <linux/pm.h>
  35#include <linux/agp_backend.h>
  36#include <linux/vmalloc.h>
  37#include <linux/dma-mapping.h>
  38#include <linux/mm.h>
  39#include <linux/sched.h>
  40#include <linux/slab.h>
  41#include <asm/io.h>
  42#include <asm/cacheflush.h>
  43#include <asm/pgtable.h>
  44#include "agp.h"
  45
  46__u32 *agp_gatt_table;
  47int agp_memory_reserved;
  48
  49/*
  50 * Needed by the Nforce GART driver for the time being. Would be
  51 * nice to do this some other way instead of needing this export.
  52 */
  53EXPORT_SYMBOL_GPL(agp_memory_reserved);
  54
  55/*
  56 * Generic routines for handling agp_memory structures -
  57 * They use the basic page allocation routines to do the brunt of the work.
  58 */
  59
  60void agp_free_key(int key)
  61{
  62	if (key < 0)
  63		return;
  64
  65	if (key < MAXKEY)
  66		clear_bit(key, agp_bridge->key_list);
  67}
  68EXPORT_SYMBOL(agp_free_key);
  69
  70
  71static int agp_get_key(void)
  72{
  73	int bit;
  74
  75	bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
  76	if (bit < MAXKEY) {
  77		set_bit(bit, agp_bridge->key_list);
  78		return bit;
  79	}
  80	return -1;
  81}
  82
  83/*
  84 * Use kmalloc if possible for the page list. Otherwise fall back to
  85 * vmalloc. This speeds things up and also saves memory for small AGP
  86 * regions.
  87 */
  88
  89void agp_alloc_page_array(size_t size, struct agp_memory *mem)
  90{
  91	mem->pages = NULL;
  92
  93	if (size <= 2*PAGE_SIZE)
  94		mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
  95	if (mem->pages == NULL) {
  96		mem->pages = vmalloc(size);
  97	}
  98}
  99EXPORT_SYMBOL(agp_alloc_page_array);
 100
 101void agp_free_page_array(struct agp_memory *mem)
 102{
 103	if (is_vmalloc_addr(mem->pages)) {
 104		vfree(mem->pages);
 105	} else {
 106		kfree(mem->pages);
 107	}
 108}
 109EXPORT_SYMBOL(agp_free_page_array);
 110
 111
 112static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
 113{
 114	struct agp_memory *new;
 115	unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
 116
 117	if (INT_MAX/sizeof(struct page *) < num_agp_pages)
 118		return NULL;
 119
 120	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
 121	if (new == NULL)
 122		return NULL;
 123
 124	new->key = agp_get_key();
 125
 126	if (new->key < 0) {
 127		kfree(new);
 128		return NULL;
 129	}
 130
 131	agp_alloc_page_array(alloc_size, new);
 132
 133	if (new->pages == NULL) {
 134		agp_free_key(new->key);
 135		kfree(new);
 136		return NULL;
 137	}
 138	new->num_scratch_pages = 0;
 139	return new;
 140}
 141
 142struct agp_memory *agp_create_memory(int scratch_pages)
 143{
 144	struct agp_memory *new;
 145
 146	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
 147	if (new == NULL)
 148		return NULL;
 149
 150	new->key = agp_get_key();
 151
 152	if (new->key < 0) {
 153		kfree(new);
 154		return NULL;
 155	}
 156
 157	agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
 158
 159	if (new->pages == NULL) {
 160		agp_free_key(new->key);
 161		kfree(new);
 162		return NULL;
 163	}
 164	new->num_scratch_pages = scratch_pages;
 165	new->type = AGP_NORMAL_MEMORY;
 166	return new;
 167}
 168EXPORT_SYMBOL(agp_create_memory);
 169
 170/**
 171 *	agp_free_memory - free memory associated with an agp_memory pointer.
 172 *
 173 *	@curr:		agp_memory pointer to be freed.
 174 *
 175 *	It is the only function that can be called when the backend is not owned
 176 *	by the caller.  (So it can free memory on client death.)
 177 */
 178void agp_free_memory(struct agp_memory *curr)
 179{
 180	size_t i;
 181
 182	if (curr == NULL)
 183		return;
 184
 185	if (curr->is_bound)
 186		agp_unbind_memory(curr);
 187
 188	if (curr->type >= AGP_USER_TYPES) {
 189		agp_generic_free_by_type(curr);
 190		return;
 191	}
 192
 193	if (curr->type != 0) {
 194		curr->bridge->driver->free_by_type(curr);
 195		return;
 196	}
 197	if (curr->page_count != 0) {
 198		if (curr->bridge->driver->agp_destroy_pages) {
 199			curr->bridge->driver->agp_destroy_pages(curr);
 200		} else {
 201
 202			for (i = 0; i < curr->page_count; i++) {
 203				curr->bridge->driver->agp_destroy_page(
 204					curr->pages[i],
 205					AGP_PAGE_DESTROY_UNMAP);
 206			}
 207			for (i = 0; i < curr->page_count; i++) {
 208				curr->bridge->driver->agp_destroy_page(
 209					curr->pages[i],
 210					AGP_PAGE_DESTROY_FREE);
 211			}
 212		}
 213	}
 214	agp_free_key(curr->key);
 215	agp_free_page_array(curr);
 216	kfree(curr);
 217}
 218EXPORT_SYMBOL(agp_free_memory);
 219
 220#define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
 221
 222/**
 223 *	agp_allocate_memory  -  allocate a group of pages of a certain type.
 224 *
 225 *	@page_count:	size_t argument of the number of pages
 226 *	@type:	u32 argument of the type of memory to be allocated.
 227 *
 228 *	Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
 229 *	maps to physical ram.  Any other type is device dependent.
 230 *
 231 *	It returns NULL whenever memory is unavailable.
 232 */
 233struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
 234					size_t page_count, u32 type)
 235{
 236	int scratch_pages;
 237	struct agp_memory *new;
 238	size_t i;
 239	int cur_memory;
 240
 241	if (!bridge)
 242		return NULL;
 243
 244	cur_memory = atomic_read(&bridge->current_memory_agp);
 245	if ((cur_memory + page_count > bridge->max_memory_agp) ||
 246	    (cur_memory + page_count < page_count))
 247		return NULL;
 248
 249	if (type >= AGP_USER_TYPES) {
 250		new = agp_generic_alloc_user(page_count, type);
 251		if (new)
 252			new->bridge = bridge;
 253		return new;
 254	}
 255
 256	if (type != 0) {
 257		new = bridge->driver->alloc_by_type(page_count, type);
 258		if (new)
 259			new->bridge = bridge;
 260		return new;
 261	}
 262
 263	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
 264
 265	new = agp_create_memory(scratch_pages);
 266
 267	if (new == NULL)
 268		return NULL;
 269
 270	if (bridge->driver->agp_alloc_pages) {
 271		if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
 272			agp_free_memory(new);
 273			return NULL;
 274		}
 275		new->bridge = bridge;
 276		return new;
 277	}
 278
 279	for (i = 0; i < page_count; i++) {
 280		struct page *page = bridge->driver->agp_alloc_page(bridge);
 281
 282		if (page == NULL) {
 283			agp_free_memory(new);
 284			return NULL;
 285		}
 286		new->pages[i] = page;
 287		new->page_count++;
 288	}
 289	new->bridge = bridge;
 290
 291	return new;
 292}
 293EXPORT_SYMBOL(agp_allocate_memory);
 294
 295
 296/* End - Generic routines for handling agp_memory structures */
 297
 298
 299static int agp_return_size(void)
 300{
 301	int current_size;
 302	void *temp;
 303
 304	temp = agp_bridge->current_size;
 305
 306	switch (agp_bridge->driver->size_type) {
 307	case U8_APER_SIZE:
 308		current_size = A_SIZE_8(temp)->size;
 309		break;
 310	case U16_APER_SIZE:
 311		current_size = A_SIZE_16(temp)->size;
 312		break;
 313	case U32_APER_SIZE:
 314		current_size = A_SIZE_32(temp)->size;
 315		break;
 316	case LVL2_APER_SIZE:
 317		current_size = A_SIZE_LVL2(temp)->size;
 318		break;
 319	case FIXED_APER_SIZE:
 320		current_size = A_SIZE_FIX(temp)->size;
 321		break;
 322	default:
 323		current_size = 0;
 324		break;
 325	}
 326
 327	current_size -= (agp_memory_reserved / (1024*1024));
 328	if (current_size <0)
 329		current_size = 0;
 330	return current_size;
 331}
 332
 333
 334int agp_num_entries(void)
 335{
 336	int num_entries;
 337	void *temp;
 338
 339	temp = agp_bridge->current_size;
 340
 341	switch (agp_bridge->driver->size_type) {
 342	case U8_APER_SIZE:
 343		num_entries = A_SIZE_8(temp)->num_entries;
 344		break;
 345	case U16_APER_SIZE:
 346		num_entries = A_SIZE_16(temp)->num_entries;
 347		break;
 348	case U32_APER_SIZE:
 349		num_entries = A_SIZE_32(temp)->num_entries;
 350		break;
 351	case LVL2_APER_SIZE:
 352		num_entries = A_SIZE_LVL2(temp)->num_entries;
 353		break;
 354	case FIXED_APER_SIZE:
 355		num_entries = A_SIZE_FIX(temp)->num_entries;
 356		break;
 357	default:
 358		num_entries = 0;
 359		break;
 360	}
 361
 362	num_entries -= agp_memory_reserved>>PAGE_SHIFT;
 363	if (num_entries<0)
 364		num_entries = 0;
 365	return num_entries;
 366}
 367EXPORT_SYMBOL_GPL(agp_num_entries);
 368
 369
 370/**
 371 *	agp_copy_info  -  copy bridge state information
 372 *
 373 *	@info:		agp_kern_info pointer.  The caller should insure that this pointer is valid.
 374 *
 375 *	This function copies information about the agp bridge device and the state of
 376 *	the agp backend into an agp_kern_info pointer.
 377 */
 378int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
 379{
 380	memset(info, 0, sizeof(struct agp_kern_info));
 381	if (!bridge) {
 382		info->chipset = NOT_SUPPORTED;
 383		return -EIO;
 384	}
 385
 386	info->version.major = bridge->version->major;
 387	info->version.minor = bridge->version->minor;
 388	info->chipset = SUPPORTED;
 389	info->device = bridge->dev;
 390	if (bridge->mode & AGPSTAT_MODE_3_0)
 391		info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
 392	else
 393		info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
 394	info->aper_base = bridge->gart_bus_addr;
 395	info->aper_size = agp_return_size();
 396	info->max_memory = bridge->max_memory_agp;
 397	info->current_memory = atomic_read(&bridge->current_memory_agp);
 398	info->cant_use_aperture = bridge->driver->cant_use_aperture;
 399	info->vm_ops = bridge->vm_ops;
 400	info->page_mask = ~0UL;
 401	return 0;
 402}
 403EXPORT_SYMBOL(agp_copy_info);
 404
 405/* End - Routine to copy over information structure */
 406
 407/*
 408 * Routines for handling swapping of agp_memory into the GATT -
 409 * These routines take agp_memory and insert them into the GATT.
 410 * They call device specific routines to actually write to the GATT.
 411 */
 412
 413/**
 414 *	agp_bind_memory  -  Bind an agp_memory structure into the GATT.
 415 *
 416 *	@curr:		agp_memory pointer
 417 *	@pg_start:	an offset into the graphics aperture translation table
 418 *
 419 *	It returns -EINVAL if the pointer == NULL.
 420 *	It returns -EBUSY if the area of the table requested is already in use.
 421 */
 422int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
 423{
 424	int ret_val;
 425
 426	if (curr == NULL)
 427		return -EINVAL;
 428
 429	if (curr->is_bound) {
 430		printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
 431		return -EINVAL;
 432	}
 433	if (!curr->is_flushed) {
 434		curr->bridge->driver->cache_flush();
 435		curr->is_flushed = true;
 436	}
 437
 438	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
 439
 440	if (ret_val != 0)
 441		return ret_val;
 442
 443	curr->is_bound = true;
 444	curr->pg_start = pg_start;
 445	spin_lock(&agp_bridge->mapped_lock);
 446	list_add(&curr->mapped_list, &agp_bridge->mapped_list);
 447	spin_unlock(&agp_bridge->mapped_lock);
 448
 449	return 0;
 450}
 451EXPORT_SYMBOL(agp_bind_memory);
 452
 453
 454/**
 455 *	agp_unbind_memory  -  Removes an agp_memory structure from the GATT
 456 *
 457 * @curr:	agp_memory pointer to be removed from the GATT.
 458 *
 459 * It returns -EINVAL if this piece of agp_memory is not currently bound to
 460 * the graphics aperture translation table or if the agp_memory pointer == NULL
 461 */
 462int agp_unbind_memory(struct agp_memory *curr)
 463{
 464	int ret_val;
 465
 466	if (curr == NULL)
 467		return -EINVAL;
 468
 469	if (!curr->is_bound) {
 470		printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
 471		return -EINVAL;
 472	}
 473
 474	ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
 475
 476	if (ret_val != 0)
 477		return ret_val;
 478
 479	curr->is_bound = false;
 480	curr->pg_start = 0;
 481	spin_lock(&curr->bridge->mapped_lock);
 482	list_del(&curr->mapped_list);
 483	spin_unlock(&curr->bridge->mapped_lock);
 484	return 0;
 485}
 486EXPORT_SYMBOL(agp_unbind_memory);
 487
 488
 489/* End - Routines for handling swapping of agp_memory into the GATT */
 490
 491
 492/* Generic Agp routines - Start */
 493static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
 494{
 495	u32 tmp;
 496
 497	if (*requested_mode & AGP2_RESERVED_MASK) {
 498		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
 499			*requested_mode & AGP2_RESERVED_MASK, *requested_mode);
 500		*requested_mode &= ~AGP2_RESERVED_MASK;
 501	}
 502
 503	/*
 504	 * Some dumb bridges are programmed to disobey the AGP2 spec.
 505	 * This is likely a BIOS misprogramming rather than poweron default, or
 506	 * it would be a lot more common.
 507	 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
 508	 * AGPv2 spec 6.1.9 states:
 509	 *   The RATE field indicates the data transfer rates supported by this
 510	 *   device. A.G.P. devices must report all that apply.
 511	 * Fix them up as best we can.
 512	 */
 513	switch (*bridge_agpstat & 7) {
 514	case 4:
 515		*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
 516		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
 517			"Fixing up support for x2 & x1\n");
 518		break;
 519	case 2:
 520		*bridge_agpstat |= AGPSTAT2_1X;
 521		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
 522			"Fixing up support for x1\n");
 523		break;
 524	default:
 525		break;
 526	}
 527
 528	/* Check the speed bits make sense. Only one should be set. */
 529	tmp = *requested_mode & 7;
 530	switch (tmp) {
 531		case 0:
 532			printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
 533			*requested_mode |= AGPSTAT2_1X;
 534			break;
 535		case 1:
 536		case 2:
 537			break;
 538		case 3:
 539			*requested_mode &= ~(AGPSTAT2_1X);	/* rate=2 */
 540			break;
 541		case 4:
 542			break;
 543		case 5:
 544		case 6:
 545		case 7:
 546			*requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
 547			break;
 548	}
 549
 550	/* disable SBA if it's not supported */
 551	if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
 552		*bridge_agpstat &= ~AGPSTAT_SBA;
 553
 554	/* Set rate */
 555	if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
 556		*bridge_agpstat &= ~AGPSTAT2_4X;
 557
 558	if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
 559		*bridge_agpstat &= ~AGPSTAT2_2X;
 560
 561	if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
 562		*bridge_agpstat &= ~AGPSTAT2_1X;
 563
 564	/* Now we know what mode it should be, clear out the unwanted bits. */
 565	if (*bridge_agpstat & AGPSTAT2_4X)
 566		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X);	/* 4X */
 567
 568	if (*bridge_agpstat & AGPSTAT2_2X)
 569		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X);	/* 2X */
 570
 571	if (*bridge_agpstat & AGPSTAT2_1X)
 572		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);	/* 1X */
 573
 574	/* Apply any errata. */
 575	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
 576		*bridge_agpstat &= ~AGPSTAT_FW;
 577
 578	if (agp_bridge->flags & AGP_ERRATA_SBA)
 579		*bridge_agpstat &= ~AGPSTAT_SBA;
 580
 581	if (agp_bridge->flags & AGP_ERRATA_1X) {
 582		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
 583		*bridge_agpstat |= AGPSTAT2_1X;
 584	}
 585
 586	/* If we've dropped down to 1X, disable fast writes. */
 587	if (*bridge_agpstat & AGPSTAT2_1X)
 588		*bridge_agpstat &= ~AGPSTAT_FW;
 589}
 590
 591/*
 592 * requested_mode = Mode requested by (typically) X.
 593 * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
 594 * vga_agpstat = PCI_AGP_STATUS from graphic card.
 595 */
 596static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
 597{
 598	u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
 599	u32 tmp;
 600
 601	if (*requested_mode & AGP3_RESERVED_MASK) {
 602		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
 603			*requested_mode & AGP3_RESERVED_MASK, *requested_mode);
 604		*requested_mode &= ~AGP3_RESERVED_MASK;
 605	}
 606
 607	/* Check the speed bits make sense. */
 608	tmp = *requested_mode & 7;
 609	if (tmp == 0) {
 610		printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
 611		*requested_mode |= AGPSTAT3_4X;
 612	}
 613	if (tmp >= 3) {
 614		printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
 615		*requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
 616	}
 617
 618	/* ARQSZ - Set the value to the maximum one.
 619	 * Don't allow the mode register to override values. */
 620	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
 621		max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
 622
 623	/* Calibration cycle.
 624	 * Don't allow the mode register to override values. */
 625	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
 626		min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
 627
 628	/* SBA *must* be supported for AGP v3 */
 629	*bridge_agpstat |= AGPSTAT_SBA;
 630
 631	/*
 632	 * Set speed.
 633	 * Check for invalid speeds. This can happen when applications
 634	 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
 635	 */
 636	if (*requested_mode & AGPSTAT_MODE_3_0) {
 637		/*
 638		 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
 639		 * have been passed a 3.0 mode, but with 2.x speed bits set.
 640		 * AGP2.x 4x -> AGP3.0 4x.
 641		 */
 642		if (*requested_mode & AGPSTAT2_4X) {
 643			printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
 644						current->comm, *requested_mode);
 645			*requested_mode &= ~AGPSTAT2_4X;
 646			*requested_mode |= AGPSTAT3_4X;
 647		}
 648	} else {
 649		/*
 650		 * The caller doesn't know what they are doing. We are in 3.0 mode,
 651		 * but have been passed an AGP 2.x mode.
 652		 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
 653		 */
 654		printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
 655					current->comm, *requested_mode);
 656		*requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
 657		*requested_mode |= AGPSTAT3_4X;
 658	}
 659
 660	if (*requested_mode & AGPSTAT3_8X) {
 661		if (!(*bridge_agpstat & AGPSTAT3_8X)) {
 662			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 663			*bridge_agpstat |= AGPSTAT3_4X;
 664			printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
 665			return;
 666		}
 667		if (!(*vga_agpstat & AGPSTAT3_8X)) {
 668			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 669			*bridge_agpstat |= AGPSTAT3_4X;
 670			printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
 671			return;
 672		}
 673		/* All set, bridge & device can do AGP x8*/
 674		*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 675		goto done;
 676
 677	} else if (*requested_mode & AGPSTAT3_4X) {
 678		*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 679		*bridge_agpstat |= AGPSTAT3_4X;
 680		goto done;
 681
 682	} else {
 683
 684		/*
 685		 * If we didn't specify an AGP mode, we see if both
 686		 * the graphics card, and the bridge can do x8, and use if so.
 687		 * If not, we fall back to x4 mode.
 688		 */
 689		if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
 690			printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
 691				"supported by bridge & card (x8).\n");
 692			*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 693			*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
 694		} else {
 695			printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
 696			if (!(*bridge_agpstat & AGPSTAT3_8X)) {
 697				printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
 698					*bridge_agpstat, origbridge);
 699				*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 700				*bridge_agpstat |= AGPSTAT3_4X;
 701			}
 702			if (!(*vga_agpstat & AGPSTAT3_8X)) {
 703				printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
 704					*vga_agpstat, origvga);
 705				*vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
 706				*vga_agpstat |= AGPSTAT3_4X;
 707			}
 708		}
 709	}
 710
 711done:
 712	/* Apply any errata. */
 713	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
 714		*bridge_agpstat &= ~AGPSTAT_FW;
 715
 716	if (agp_bridge->flags & AGP_ERRATA_SBA)
 717		*bridge_agpstat &= ~AGPSTAT_SBA;
 718
 719	if (agp_bridge->flags & AGP_ERRATA_1X) {
 720		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
 721		*bridge_agpstat |= AGPSTAT2_1X;
 722	}
 723}
 724
 725
 726/**
 727 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
 728 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
 729 * @requested_mode: requested agp_stat from userspace (Typically from X)
 730 * @bridge_agpstat: current agp_stat from AGP bridge.
 731 *
 732 * This function will hunt for an AGP graphics card, and try to match
 733 * the requested mode to the capabilities of both the bridge and the card.
 734 */
 735u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
 736{
 737	struct pci_dev *device = NULL;
 738	u32 vga_agpstat;
 739	u8 cap_ptr;
 740
 741	for (;;) {
 742		device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
 743		if (!device) {
 744			printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
 745			return 0;
 746		}
 747		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
 748		if (cap_ptr)
 749			break;
 750	}
 751
 752	/*
 753	 * Ok, here we have a AGP device. Disable impossible
 754	 * settings, and adjust the readqueue to the minimum.
 755	 */
 756	pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
 757
 758	/* adjust RQ depth */
 759	bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
 760	     min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
 761		 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
 762
 763	/* disable FW if it's not supported */
 764	if (!((bridge_agpstat & AGPSTAT_FW) &&
 765		 (vga_agpstat & AGPSTAT_FW) &&
 766		 (requested_mode & AGPSTAT_FW)))
 767		bridge_agpstat &= ~AGPSTAT_FW;
 768
 769	/* Check to see if we are operating in 3.0 mode */
 770	if (agp_bridge->mode & AGPSTAT_MODE_3_0)
 771		agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
 772	else
 773		agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
 774
 775	pci_dev_put(device);
 776	return bridge_agpstat;
 777}
 778EXPORT_SYMBOL(agp_collect_device_status);
 779
 780
 781void agp_device_command(u32 bridge_agpstat, bool agp_v3)
 782{
 783	struct pci_dev *device = NULL;
 784	int mode;
 785
 786	mode = bridge_agpstat & 0x7;
 787	if (agp_v3)
 788		mode *= 4;
 789
 790	for_each_pci_dev(device) {
 791		u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
 792		if (!agp)
 793			continue;
 794
 795		dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
 796			 agp_v3 ? 3 : 2, mode);
 797		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
 798	}
 799}
 800EXPORT_SYMBOL(agp_device_command);
 801
 802
 803void get_agp_version(struct agp_bridge_data *bridge)
 804{
 805	u32 ncapid;
 806
 807	/* Exit early if already set by errata workarounds. */
 808	if (bridge->major_version != 0)
 809		return;
 810
 811	pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
 812	bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
 813	bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
 814}
 815EXPORT_SYMBOL(get_agp_version);
 816
 817
 818void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
 819{
 820	u32 bridge_agpstat, temp;
 821
 822	get_agp_version(agp_bridge);
 823
 824	dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
 825		 agp_bridge->major_version, agp_bridge->minor_version);
 826
 827	pci_read_config_dword(agp_bridge->dev,
 828		      agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
 829
 830	bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
 831	if (bridge_agpstat == 0)
 832		/* Something bad happened. FIXME: Return error code? */
 833		return;
 834
 835	bridge_agpstat |= AGPSTAT_AGP_ENABLE;
 836
 837	/* Do AGP version specific frobbing. */
 838	if (bridge->major_version >= 3) {
 839		if (bridge->mode & AGPSTAT_MODE_3_0) {
 840			/* If we have 3.5, we can do the isoch stuff. */
 841			if (bridge->minor_version >= 5)
 842				agp_3_5_enable(bridge);
 843			agp_device_command(bridge_agpstat, true);
 844			return;
 845		} else {
 846		    /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
 847		    bridge_agpstat &= ~(7<<10) ;
 848		    pci_read_config_dword(bridge->dev,
 849					bridge->capndx+AGPCTRL, &temp);
 850		    temp |= (1<<9);
 851		    pci_write_config_dword(bridge->dev,
 852					bridge->capndx+AGPCTRL, temp);
 853
 854		    dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
 855		}
 856	}
 857
 858	/* AGP v<3 */
 859	agp_device_command(bridge_agpstat, false);
 860}
 861EXPORT_SYMBOL(agp_generic_enable);
 862
 863
 864int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
 865{
 866	char *table;
 867	char *table_end;
 868	int size;
 869	int page_order;
 870	int num_entries;
 871	int i;
 872	void *temp;
 873	struct page *page;
 874
 875	/* The generic routines can't handle 2 level gatt's */
 876	if (bridge->driver->size_type == LVL2_APER_SIZE)
 877		return -EINVAL;
 878
 879	table = NULL;
 880	i = bridge->aperture_size_idx;
 881	temp = bridge->current_size;
 882	size = page_order = num_entries = 0;
 883
 884	if (bridge->driver->size_type != FIXED_APER_SIZE) {
 885		do {
 886			switch (bridge->driver->size_type) {
 887			case U8_APER_SIZE:
 888				size = A_SIZE_8(temp)->size;
 889				page_order =
 890				    A_SIZE_8(temp)->page_order;
 891				num_entries =
 892				    A_SIZE_8(temp)->num_entries;
 893				break;
 894			case U16_APER_SIZE:
 895				size = A_SIZE_16(temp)->size;
 896				page_order = A_SIZE_16(temp)->page_order;
 897				num_entries = A_SIZE_16(temp)->num_entries;
 898				break;
 899			case U32_APER_SIZE:
 900				size = A_SIZE_32(temp)->size;
 901				page_order = A_SIZE_32(temp)->page_order;
 902				num_entries = A_SIZE_32(temp)->num_entries;
 903				break;
 904				/* This case will never really happen. */
 905			case FIXED_APER_SIZE:
 906			case LVL2_APER_SIZE:
 907			default:
 908				size = page_order = num_entries = 0;
 909				break;
 910			}
 911
 912			table = alloc_gatt_pages(page_order);
 913
 914			if (table == NULL) {
 915				i++;
 916				switch (bridge->driver->size_type) {
 917				case U8_APER_SIZE:
 918					bridge->current_size = A_IDX8(bridge);
 919					break;
 920				case U16_APER_SIZE:
 921					bridge->current_size = A_IDX16(bridge);
 922					break;
 923				case U32_APER_SIZE:
 924					bridge->current_size = A_IDX32(bridge);
 925					break;
 926				/* These cases will never really happen. */
 927				case FIXED_APER_SIZE:
 928				case LVL2_APER_SIZE:
 929				default:
 930					break;
 931				}
 932				temp = bridge->current_size;
 933			} else {
 934				bridge->aperture_size_idx = i;
 935			}
 936		} while (!table && (i < bridge->driver->num_aperture_sizes));
 937	} else {
 938		size = ((struct aper_size_info_fixed *) temp)->size;
 939		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
 940		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
 941		table = alloc_gatt_pages(page_order);
 942	}
 943
 944	if (table == NULL)
 945		return -ENOMEM;
 946
 947	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
 948
 949	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
 950		SetPageReserved(page);
 951
 952	bridge->gatt_table_real = (u32 *) table;
 953	agp_gatt_table = (void *)table;
 954
 955	bridge->driver->cache_flush();
 956#ifdef CONFIG_X86
 957	if (set_memory_uc((unsigned long)table, 1 << page_order))
 958		printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
 959
 960	bridge->gatt_table = (u32 __iomem *)table;
 961#else
 962	bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
 963					(PAGE_SIZE * (1 << page_order)));
 964	bridge->driver->cache_flush();
 965#endif
 966
 967	if (bridge->gatt_table == NULL) {
 968		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
 969			ClearPageReserved(page);
 970
 971		free_gatt_pages(table, page_order);
 972
 973		return -ENOMEM;
 974	}
 975	bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
 976
 977	/* AK: bogus, should encode addresses > 4GB */
 978	for (i = 0; i < num_entries; i++) {
 979		writel(bridge->scratch_page, bridge->gatt_table+i);
 980		readl(bridge->gatt_table+i);	/* PCI Posting. */
 981	}
 982
 983	return 0;
 984}
 985EXPORT_SYMBOL(agp_generic_create_gatt_table);
 986
 987int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
 988{
 989	int page_order;
 990	char *table, *table_end;
 991	void *temp;
 992	struct page *page;
 993
 994	temp = bridge->current_size;
 995
 996	switch (bridge->driver->size_type) {
 997	case U8_APER_SIZE:
 998		page_order = A_SIZE_8(temp)->page_order;
 999		break;
1000	case U16_APER_SIZE:
1001		page_order = A_SIZE_16(temp)->page_order;
1002		break;
1003	case U32_APER_SIZE:
1004		page_order = A_SIZE_32(temp)->page_order;
1005		break;
1006	case FIXED_APER_SIZE:
1007		page_order = A_SIZE_FIX(temp)->page_order;
1008		break;
1009	case LVL2_APER_SIZE:
1010		/* The generic routines can't deal with 2 level gatt's */
1011		return -EINVAL;
 
1012	default:
1013		page_order = 0;
1014		break;
1015	}
1016
1017	/* Do not worry about freeing memory, because if this is
1018	 * called, then all agp memory is deallocated and removed
1019	 * from the table. */
1020
1021#ifdef CONFIG_X86
1022	set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
1023#else
1024	iounmap(bridge->gatt_table);
1025#endif
1026	table = (char *) bridge->gatt_table_real;
1027	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
1028
1029	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
1030		ClearPageReserved(page);
1031
1032	free_gatt_pages(bridge->gatt_table_real, page_order);
1033
1034	agp_gatt_table = NULL;
1035	bridge->gatt_table = NULL;
1036	bridge->gatt_table_real = NULL;
1037	bridge->gatt_bus_addr = 0;
1038
1039	return 0;
1040}
1041EXPORT_SYMBOL(agp_generic_free_gatt_table);
1042
1043
1044int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1045{
1046	int num_entries;
1047	size_t i;
1048	off_t j;
1049	void *temp;
1050	struct agp_bridge_data *bridge;
1051	int mask_type;
1052
1053	bridge = mem->bridge;
1054	if (!bridge)
1055		return -EINVAL;
1056
1057	if (mem->page_count == 0)
1058		return 0;
1059
1060	temp = bridge->current_size;
1061
1062	switch (bridge->driver->size_type) {
1063	case U8_APER_SIZE:
1064		num_entries = A_SIZE_8(temp)->num_entries;
1065		break;
1066	case U16_APER_SIZE:
1067		num_entries = A_SIZE_16(temp)->num_entries;
1068		break;
1069	case U32_APER_SIZE:
1070		num_entries = A_SIZE_32(temp)->num_entries;
1071		break;
1072	case FIXED_APER_SIZE:
1073		num_entries = A_SIZE_FIX(temp)->num_entries;
1074		break;
1075	case LVL2_APER_SIZE:
1076		/* The generic routines can't deal with 2 level gatt's */
1077		return -EINVAL;
 
1078	default:
1079		num_entries = 0;
1080		break;
1081	}
1082
1083	num_entries -= agp_memory_reserved/PAGE_SIZE;
1084	if (num_entries < 0) num_entries = 0;
1085
1086	if (type != mem->type)
1087		return -EINVAL;
1088
1089	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1090	if (mask_type != 0) {
1091		/* The generic routines know nothing of memory types */
1092		return -EINVAL;
1093	}
1094
1095	if (((pg_start + mem->page_count) > num_entries) ||
1096	    ((pg_start + mem->page_count) < pg_start))
1097		return -EINVAL;
1098
1099	j = pg_start;
1100
1101	while (j < (pg_start + mem->page_count)) {
1102		if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
1103			return -EBUSY;
1104		j++;
1105	}
1106
1107	if (!mem->is_flushed) {
1108		bridge->driver->cache_flush();
1109		mem->is_flushed = true;
1110	}
1111
1112	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1113		writel(bridge->driver->mask_memory(bridge,
1114						   page_to_phys(mem->pages[i]),
1115						   mask_type),
1116		       bridge->gatt_table+j);
1117	}
1118	readl(bridge->gatt_table+j-1);	/* PCI Posting. */
1119
1120	bridge->driver->tlb_flush(mem);
1121	return 0;
1122}
1123EXPORT_SYMBOL(agp_generic_insert_memory);
1124
1125
1126int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1127{
1128	size_t i;
1129	struct agp_bridge_data *bridge;
1130	int mask_type, num_entries;
1131
1132	bridge = mem->bridge;
1133	if (!bridge)
1134		return -EINVAL;
1135
1136	if (mem->page_count == 0)
1137		return 0;
1138
1139	if (type != mem->type)
1140		return -EINVAL;
1141
1142	num_entries = agp_num_entries();
1143	if (((pg_start + mem->page_count) > num_entries) ||
1144	    ((pg_start + mem->page_count) < pg_start))
1145		return -EINVAL;
1146
1147	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1148	if (mask_type != 0) {
1149		/* The generic routines know nothing of memory types */
1150		return -EINVAL;
1151	}
1152
1153	/* AK: bogus, should encode addresses > 4GB */
1154	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1155		writel(bridge->scratch_page, bridge->gatt_table+i);
1156	}
1157	readl(bridge->gatt_table+i-1);	/* PCI Posting. */
1158
1159	bridge->driver->tlb_flush(mem);
1160	return 0;
1161}
1162EXPORT_SYMBOL(agp_generic_remove_memory);
1163
1164struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1165{
1166	return NULL;
1167}
1168EXPORT_SYMBOL(agp_generic_alloc_by_type);
1169
1170void agp_generic_free_by_type(struct agp_memory *curr)
1171{
1172	agp_free_page_array(curr);
1173	agp_free_key(curr->key);
1174	kfree(curr);
1175}
1176EXPORT_SYMBOL(agp_generic_free_by_type);
1177
1178struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1179{
1180	struct agp_memory *new;
1181	int i;
1182	int pages;
1183
1184	pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1185	new = agp_create_user_memory(page_count);
1186	if (new == NULL)
1187		return NULL;
1188
1189	for (i = 0; i < page_count; i++)
1190		new->pages[i] = NULL;
1191	new->page_count = 0;
1192	new->type = type;
1193	new->num_scratch_pages = pages;
1194
1195	return new;
1196}
1197EXPORT_SYMBOL(agp_generic_alloc_user);
1198
1199/*
1200 * Basic Page Allocation Routines -
1201 * These routines handle page allocation and by default they reserve the allocated
1202 * memory.  They also handle incrementing the current_memory_agp value, Which is checked
1203 * against a maximum value.
1204 */
1205
1206int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
1207{
1208	struct page * page;
1209	int i, ret = -ENOMEM;
1210
1211	for (i = 0; i < num_pages; i++) {
1212		page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1213		/* agp_free_memory() needs gart address */
1214		if (page == NULL)
1215			goto out;
1216
1217#ifndef CONFIG_X86
1218		map_page_into_agp(page);
1219#endif
1220		get_page(page);
1221		atomic_inc(&agp_bridge->current_memory_agp);
1222
1223		mem->pages[i] = page;
1224		mem->page_count++;
1225	}
1226
1227#ifdef CONFIG_X86
1228	set_pages_array_uc(mem->pages, num_pages);
1229#endif
1230	ret = 0;
1231out:
1232	return ret;
1233}
1234EXPORT_SYMBOL(agp_generic_alloc_pages);
1235
1236struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1237{
1238	struct page * page;
1239
1240	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1241	if (page == NULL)
1242		return NULL;
1243
1244	map_page_into_agp(page);
1245
1246	get_page(page);
1247	atomic_inc(&agp_bridge->current_memory_agp);
1248	return page;
1249}
1250EXPORT_SYMBOL(agp_generic_alloc_page);
1251
1252void agp_generic_destroy_pages(struct agp_memory *mem)
1253{
1254	int i;
1255	struct page *page;
1256
1257	if (!mem)
1258		return;
1259
1260#ifdef CONFIG_X86
1261	set_pages_array_wb(mem->pages, mem->page_count);
1262#endif
1263
1264	for (i = 0; i < mem->page_count; i++) {
1265		page = mem->pages[i];
1266
1267#ifndef CONFIG_X86
1268		unmap_page_from_agp(page);
1269#endif
1270		put_page(page);
1271		__free_page(page);
1272		atomic_dec(&agp_bridge->current_memory_agp);
1273		mem->pages[i] = NULL;
1274	}
1275}
1276EXPORT_SYMBOL(agp_generic_destroy_pages);
1277
1278void agp_generic_destroy_page(struct page *page, int flags)
1279{
1280	if (page == NULL)
1281		return;
1282
1283	if (flags & AGP_PAGE_DESTROY_UNMAP)
1284		unmap_page_from_agp(page);
1285
1286	if (flags & AGP_PAGE_DESTROY_FREE) {
1287		put_page(page);
1288		__free_page(page);
1289		atomic_dec(&agp_bridge->current_memory_agp);
1290	}
1291}
1292EXPORT_SYMBOL(agp_generic_destroy_page);
1293
1294/* End Basic Page Allocation Routines */
1295
1296
1297/**
1298 * agp_enable  -  initialise the agp point-to-point connection.
1299 *
1300 * @mode:	agp mode register value to configure with.
1301 */
1302void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1303{
1304	if (!bridge)
1305		return;
1306	bridge->driver->agp_enable(bridge, mode);
1307}
1308EXPORT_SYMBOL(agp_enable);
1309
1310/* When we remove the global variable agp_bridge from all drivers
1311 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1312 */
1313
1314struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1315{
1316	if (list_empty(&agp_bridges))
1317		return NULL;
1318
1319	return agp_bridge;
1320}
1321
1322static void ipi_handler(void *null)
1323{
1324	flush_agp_cache();
1325}
1326
1327void global_cache_flush(void)
1328{
1329	if (on_each_cpu(ipi_handler, NULL, 1) != 0)
1330		panic(PFX "timed out waiting for the other CPUs!\n");
1331}
1332EXPORT_SYMBOL(global_cache_flush);
1333
1334unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1335				      dma_addr_t addr, int type)
1336{
1337	/* memory type is ignored in the generic routine */
1338	if (bridge->driver->masks)
1339		return addr | bridge->driver->masks[0].mask;
1340	else
1341		return addr;
1342}
1343EXPORT_SYMBOL(agp_generic_mask_memory);
1344
1345int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1346				  int type)
1347{
1348	if (type >= AGP_USER_TYPES)
1349		return 0;
1350	return type;
1351}
1352EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1353
1354/*
1355 * These functions are implemented according to the AGPv3 spec,
1356 * which covers implementation details that had previously been
1357 * left open.
1358 */
1359
1360int agp3_generic_fetch_size(void)
1361{
1362	u16 temp_size;
1363	int i;
1364	struct aper_size_info_16 *values;
1365
1366	pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1367	values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1368
1369	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1370		if (temp_size == values[i].size_value) {
1371			agp_bridge->previous_size =
1372				agp_bridge->current_size = (void *) (values + i);
1373
1374			agp_bridge->aperture_size_idx = i;
1375			return values[i].size;
1376		}
1377	}
1378	return 0;
1379}
1380EXPORT_SYMBOL(agp3_generic_fetch_size);
1381
1382void agp3_generic_tlbflush(struct agp_memory *mem)
1383{
1384	u32 ctrl;
1385	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1386	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1387	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1388}
1389EXPORT_SYMBOL(agp3_generic_tlbflush);
1390
1391int agp3_generic_configure(void)
1392{
1393	u32 temp;
1394	struct aper_size_info_16 *current_size;
1395
1396	current_size = A_SIZE_16(agp_bridge->current_size);
1397
1398	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
1399						    AGP_APERTURE_BAR);
1400
1401	/* set aperture size */
1402	pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1403	/* set gart pointer */
1404	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1405	/* enable aperture and GTLB */
1406	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1407	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1408	return 0;
1409}
1410EXPORT_SYMBOL(agp3_generic_configure);
1411
1412void agp3_generic_cleanup(void)
1413{
1414	u32 ctrl;
1415	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1416	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1417}
1418EXPORT_SYMBOL(agp3_generic_cleanup);
1419
1420const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1421{
1422	{4096, 1048576, 10,0x000},
1423	{2048,  524288, 9, 0x800},
1424	{1024,  262144, 8, 0xc00},
1425	{ 512,  131072, 7, 0xe00},
1426	{ 256,   65536, 6, 0xf00},
1427	{ 128,   32768, 5, 0xf20},
1428	{  64,   16384, 4, 0xf30},
1429	{  32,    8192, 3, 0xf38},
1430	{  16,    4096, 2, 0xf3c},
1431	{   8,    2048, 1, 0xf3e},
1432	{   4,    1024, 0, 0xf3f}
1433};
1434EXPORT_SYMBOL(agp3_generic_sizes);
1435