Loading...
1/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18#include <linux/module.h>
19#include <linux/pci.h>
20#include <linux/kernel.h>
21#include <linux/pagemap.h>
22#include <linux/agp_backend.h>
23#include <linux/delay.h>
24#include <asm/smp.h>
25#include "agp.h"
26#include "intel-agp.h"
27#include <drm/intel-gtt.h>
28#include <asm/set_memory.h>
29
30/*
31 * If we have Intel graphics, we're not going to have anything other than
32 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33 * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
34 * Only newer chipsets need to bother with this, of course.
35 */
36#ifdef CONFIG_INTEL_IOMMU
37#define USE_PCI_DMA_API 1
38#else
39#define USE_PCI_DMA_API 0
40#endif
41
42struct intel_gtt_driver {
43 unsigned int gen : 8;
44 unsigned int is_g33 : 1;
45 unsigned int is_pineview : 1;
46 unsigned int is_ironlake : 1;
47 unsigned int has_pgtbl_enable : 1;
48 unsigned int dma_mask_size : 8;
49 /* Chipset specific GTT setup */
50 int (*setup)(void);
51 /* This should undo anything done in ->setup() save the unmapping
52 * of the mmio register file, that's done in the generic code. */
53 void (*cleanup)(void);
54 void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
55 /* Flags is a more or less chipset specific opaque value.
56 * For chipsets that need to support old ums (non-gem) code, this
57 * needs to be identical to the various supported agp memory types! */
58 bool (*check_flags)(unsigned int flags);
59 void (*chipset_flush)(void);
60};
61
62static struct _intel_private {
63 const struct intel_gtt_driver *driver;
64 struct pci_dev *pcidev; /* device one */
65 struct pci_dev *bridge_dev;
66 u8 __iomem *registers;
67 phys_addr_t gtt_phys_addr;
68 u32 PGETBL_save;
69 u32 __iomem *gtt; /* I915G */
70 bool clear_fake_agp; /* on first access via agp, fill with scratch */
71 int num_dcache_entries;
72 void __iomem *i9xx_flush_page;
73 char *i81x_gtt_table;
74 struct resource ifp_resource;
75 int resource_valid;
76 struct page *scratch_page;
77 phys_addr_t scratch_page_dma;
78 int refcount;
79 /* Whether i915 needs to use the dmar apis or not. */
80 unsigned int needs_dmar : 1;
81 phys_addr_t gma_bus_addr;
82 /* Size of memory reserved for graphics by the BIOS */
83 resource_size_t stolen_size;
84 /* Total number of gtt entries. */
85 unsigned int gtt_total_entries;
86 /* Part of the gtt that is mappable by the cpu, for those chips where
87 * this is not the full gtt. */
88 unsigned int gtt_mappable_entries;
89} intel_private;
90
91#define INTEL_GTT_GEN intel_private.driver->gen
92#define IS_G33 intel_private.driver->is_g33
93#define IS_PINEVIEW intel_private.driver->is_pineview
94#define IS_IRONLAKE intel_private.driver->is_ironlake
95#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
96
97#if IS_ENABLED(CONFIG_AGP_INTEL)
98static int intel_gtt_map_memory(struct page **pages,
99 unsigned int num_entries,
100 struct sg_table *st)
101{
102 struct scatterlist *sg;
103 int i;
104
105 DBG("try mapping %lu pages\n", (unsigned long)num_entries);
106
107 if (sg_alloc_table(st, num_entries, GFP_KERNEL))
108 goto err;
109
110 for_each_sg(st->sgl, sg, num_entries, i)
111 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
112
113 if (!pci_map_sg(intel_private.pcidev,
114 st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
115 goto err;
116
117 return 0;
118
119err:
120 sg_free_table(st);
121 return -ENOMEM;
122}
123
124static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
125{
126 struct sg_table st;
127 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
128
129 pci_unmap_sg(intel_private.pcidev, sg_list,
130 num_sg, PCI_DMA_BIDIRECTIONAL);
131
132 st.sgl = sg_list;
133 st.orig_nents = st.nents = num_sg;
134
135 sg_free_table(&st);
136}
137
138static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
139{
140 return;
141}
142
143/* Exists to support ARGB cursors */
144static struct page *i8xx_alloc_pages(void)
145{
146 struct page *page;
147
148 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
149 if (page == NULL)
150 return NULL;
151
152 if (set_pages_uc(page, 4) < 0) {
153 set_pages_wb(page, 4);
154 __free_pages(page, 2);
155 return NULL;
156 }
157 atomic_inc(&agp_bridge->current_memory_agp);
158 return page;
159}
160
161static void i8xx_destroy_pages(struct page *page)
162{
163 if (page == NULL)
164 return;
165
166 set_pages_wb(page, 4);
167 __free_pages(page, 2);
168 atomic_dec(&agp_bridge->current_memory_agp);
169}
170#endif
171
172#define I810_GTT_ORDER 4
173static int i810_setup(void)
174{
175 phys_addr_t reg_addr;
176 char *gtt_table;
177
178 /* i81x does not preallocate the gtt. It's always 64kb in size. */
179 gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
180 if (gtt_table == NULL)
181 return -ENOMEM;
182 intel_private.i81x_gtt_table = gtt_table;
183
184 reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
185
186 intel_private.registers = ioremap(reg_addr, KB(64));
187 if (!intel_private.registers)
188 return -ENOMEM;
189
190 writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
191 intel_private.registers+I810_PGETBL_CTL);
192
193 intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
194
195 if ((readl(intel_private.registers+I810_DRAM_CTL)
196 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
197 dev_info(&intel_private.pcidev->dev,
198 "detected 4MB dedicated video ram\n");
199 intel_private.num_dcache_entries = 1024;
200 }
201
202 return 0;
203}
204
205static void i810_cleanup(void)
206{
207 writel(0, intel_private.registers+I810_PGETBL_CTL);
208 free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
209}
210
211#if IS_ENABLED(CONFIG_AGP_INTEL)
212static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
213 int type)
214{
215 int i;
216
217 if ((pg_start + mem->page_count)
218 > intel_private.num_dcache_entries)
219 return -EINVAL;
220
221 if (!mem->is_flushed)
222 global_cache_flush();
223
224 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
225 dma_addr_t addr = i << PAGE_SHIFT;
226 intel_private.driver->write_entry(addr,
227 i, type);
228 }
229 wmb();
230
231 return 0;
232}
233
234/*
235 * The i810/i830 requires a physical address to program its mouse
236 * pointer into hardware.
237 * However the Xserver still writes to it through the agp aperture.
238 */
239static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
240{
241 struct agp_memory *new;
242 struct page *page;
243
244 switch (pg_count) {
245 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
246 break;
247 case 4:
248 /* kludge to get 4 physical pages for ARGB cursor */
249 page = i8xx_alloc_pages();
250 break;
251 default:
252 return NULL;
253 }
254
255 if (page == NULL)
256 return NULL;
257
258 new = agp_create_memory(pg_count);
259 if (new == NULL)
260 return NULL;
261
262 new->pages[0] = page;
263 if (pg_count == 4) {
264 /* kludge to get 4 physical pages for ARGB cursor */
265 new->pages[1] = new->pages[0] + 1;
266 new->pages[2] = new->pages[1] + 1;
267 new->pages[3] = new->pages[2] + 1;
268 }
269 new->page_count = pg_count;
270 new->num_scratch_pages = pg_count;
271 new->type = AGP_PHYS_MEMORY;
272 new->physical = page_to_phys(new->pages[0]);
273 return new;
274}
275
276static void intel_i810_free_by_type(struct agp_memory *curr)
277{
278 agp_free_key(curr->key);
279 if (curr->type == AGP_PHYS_MEMORY) {
280 if (curr->page_count == 4)
281 i8xx_destroy_pages(curr->pages[0]);
282 else {
283 agp_bridge->driver->agp_destroy_page(curr->pages[0],
284 AGP_PAGE_DESTROY_UNMAP);
285 agp_bridge->driver->agp_destroy_page(curr->pages[0],
286 AGP_PAGE_DESTROY_FREE);
287 }
288 agp_free_page_array(curr);
289 }
290 kfree(curr);
291}
292#endif
293
294static int intel_gtt_setup_scratch_page(void)
295{
296 struct page *page;
297 dma_addr_t dma_addr;
298
299 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
300 if (page == NULL)
301 return -ENOMEM;
302 set_pages_uc(page, 1);
303
304 if (intel_private.needs_dmar) {
305 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
306 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
307 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) {
308 __free_page(page);
309 return -EINVAL;
310 }
311
312 intel_private.scratch_page_dma = dma_addr;
313 } else
314 intel_private.scratch_page_dma = page_to_phys(page);
315
316 intel_private.scratch_page = page;
317
318 return 0;
319}
320
321static void i810_write_entry(dma_addr_t addr, unsigned int entry,
322 unsigned int flags)
323{
324 u32 pte_flags = I810_PTE_VALID;
325
326 switch (flags) {
327 case AGP_DCACHE_MEMORY:
328 pte_flags |= I810_PTE_LOCAL;
329 break;
330 case AGP_USER_CACHED_MEMORY:
331 pte_flags |= I830_PTE_SYSTEM_CACHED;
332 break;
333 }
334
335 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
336}
337
338static resource_size_t intel_gtt_stolen_size(void)
339{
340 u16 gmch_ctrl;
341 u8 rdct;
342 int local = 0;
343 static const int ddt[4] = { 0, 16, 32, 64 };
344 resource_size_t stolen_size = 0;
345
346 if (INTEL_GTT_GEN == 1)
347 return 0; /* no stolen mem on i81x */
348
349 pci_read_config_word(intel_private.bridge_dev,
350 I830_GMCH_CTRL, &gmch_ctrl);
351
352 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
353 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
354 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
355 case I830_GMCH_GMS_STOLEN_512:
356 stolen_size = KB(512);
357 break;
358 case I830_GMCH_GMS_STOLEN_1024:
359 stolen_size = MB(1);
360 break;
361 case I830_GMCH_GMS_STOLEN_8192:
362 stolen_size = MB(8);
363 break;
364 case I830_GMCH_GMS_LOCAL:
365 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
366 stolen_size = (I830_RDRAM_ND(rdct) + 1) *
367 MB(ddt[I830_RDRAM_DDT(rdct)]);
368 local = 1;
369 break;
370 default:
371 stolen_size = 0;
372 break;
373 }
374 } else {
375 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
376 case I855_GMCH_GMS_STOLEN_1M:
377 stolen_size = MB(1);
378 break;
379 case I855_GMCH_GMS_STOLEN_4M:
380 stolen_size = MB(4);
381 break;
382 case I855_GMCH_GMS_STOLEN_8M:
383 stolen_size = MB(8);
384 break;
385 case I855_GMCH_GMS_STOLEN_16M:
386 stolen_size = MB(16);
387 break;
388 case I855_GMCH_GMS_STOLEN_32M:
389 stolen_size = MB(32);
390 break;
391 case I915_GMCH_GMS_STOLEN_48M:
392 stolen_size = MB(48);
393 break;
394 case I915_GMCH_GMS_STOLEN_64M:
395 stolen_size = MB(64);
396 break;
397 case G33_GMCH_GMS_STOLEN_128M:
398 stolen_size = MB(128);
399 break;
400 case G33_GMCH_GMS_STOLEN_256M:
401 stolen_size = MB(256);
402 break;
403 case INTEL_GMCH_GMS_STOLEN_96M:
404 stolen_size = MB(96);
405 break;
406 case INTEL_GMCH_GMS_STOLEN_160M:
407 stolen_size = MB(160);
408 break;
409 case INTEL_GMCH_GMS_STOLEN_224M:
410 stolen_size = MB(224);
411 break;
412 case INTEL_GMCH_GMS_STOLEN_352M:
413 stolen_size = MB(352);
414 break;
415 default:
416 stolen_size = 0;
417 break;
418 }
419 }
420
421 if (stolen_size > 0) {
422 dev_info(&intel_private.bridge_dev->dev, "detected %lluK %s memory\n",
423 (u64)stolen_size / KB(1), local ? "local" : "stolen");
424 } else {
425 dev_info(&intel_private.bridge_dev->dev,
426 "no pre-allocated video memory detected\n");
427 stolen_size = 0;
428 }
429
430 return stolen_size;
431}
432
433static void i965_adjust_pgetbl_size(unsigned int size_flag)
434{
435 u32 pgetbl_ctl, pgetbl_ctl2;
436
437 /* ensure that ppgtt is disabled */
438 pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
439 pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
440 writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
441
442 /* write the new ggtt size */
443 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
444 pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
445 pgetbl_ctl |= size_flag;
446 writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
447}
448
449static unsigned int i965_gtt_total_entries(void)
450{
451 int size;
452 u32 pgetbl_ctl;
453 u16 gmch_ctl;
454
455 pci_read_config_word(intel_private.bridge_dev,
456 I830_GMCH_CTRL, &gmch_ctl);
457
458 if (INTEL_GTT_GEN == 5) {
459 switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
460 case G4x_GMCH_SIZE_1M:
461 case G4x_GMCH_SIZE_VT_1M:
462 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
463 break;
464 case G4x_GMCH_SIZE_VT_1_5M:
465 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
466 break;
467 case G4x_GMCH_SIZE_2M:
468 case G4x_GMCH_SIZE_VT_2M:
469 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
470 break;
471 }
472 }
473
474 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
475
476 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
477 case I965_PGETBL_SIZE_128KB:
478 size = KB(128);
479 break;
480 case I965_PGETBL_SIZE_256KB:
481 size = KB(256);
482 break;
483 case I965_PGETBL_SIZE_512KB:
484 size = KB(512);
485 break;
486 /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
487 case I965_PGETBL_SIZE_1MB:
488 size = KB(1024);
489 break;
490 case I965_PGETBL_SIZE_2MB:
491 size = KB(2048);
492 break;
493 case I965_PGETBL_SIZE_1_5MB:
494 size = KB(1024 + 512);
495 break;
496 default:
497 dev_info(&intel_private.pcidev->dev,
498 "unknown page table size, assuming 512KB\n");
499 size = KB(512);
500 }
501
502 return size/4;
503}
504
505static unsigned int intel_gtt_total_entries(void)
506{
507 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
508 return i965_gtt_total_entries();
509 else {
510 /* On previous hardware, the GTT size was just what was
511 * required to map the aperture.
512 */
513 return intel_private.gtt_mappable_entries;
514 }
515}
516
517static unsigned int intel_gtt_mappable_entries(void)
518{
519 unsigned int aperture_size;
520
521 if (INTEL_GTT_GEN == 1) {
522 u32 smram_miscc;
523
524 pci_read_config_dword(intel_private.bridge_dev,
525 I810_SMRAM_MISCC, &smram_miscc);
526
527 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
528 == I810_GFX_MEM_WIN_32M)
529 aperture_size = MB(32);
530 else
531 aperture_size = MB(64);
532 } else if (INTEL_GTT_GEN == 2) {
533 u16 gmch_ctrl;
534
535 pci_read_config_word(intel_private.bridge_dev,
536 I830_GMCH_CTRL, &gmch_ctrl);
537
538 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
539 aperture_size = MB(64);
540 else
541 aperture_size = MB(128);
542 } else {
543 /* 9xx supports large sizes, just look at the length */
544 aperture_size = pci_resource_len(intel_private.pcidev, 2);
545 }
546
547 return aperture_size >> PAGE_SHIFT;
548}
549
550static void intel_gtt_teardown_scratch_page(void)
551{
552 set_pages_wb(intel_private.scratch_page, 1);
553 if (intel_private.needs_dmar)
554 pci_unmap_page(intel_private.pcidev,
555 intel_private.scratch_page_dma,
556 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
557 __free_page(intel_private.scratch_page);
558}
559
560static void intel_gtt_cleanup(void)
561{
562 intel_private.driver->cleanup();
563
564 iounmap(intel_private.gtt);
565 iounmap(intel_private.registers);
566
567 intel_gtt_teardown_scratch_page();
568}
569
570/* Certain Gen5 chipsets require require idling the GPU before
571 * unmapping anything from the GTT when VT-d is enabled.
572 */
573static inline int needs_ilk_vtd_wa(void)
574{
575#ifdef CONFIG_INTEL_IOMMU
576 const unsigned short gpu_devid = intel_private.pcidev->device;
577
578 /* Query intel_iommu to see if we need the workaround. Presumably that
579 * was loaded first.
580 */
581 if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
582 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
583 intel_iommu_gfx_mapped)
584 return 1;
585#endif
586 return 0;
587}
588
589static bool intel_gtt_can_wc(void)
590{
591 if (INTEL_GTT_GEN <= 2)
592 return false;
593
594 if (INTEL_GTT_GEN >= 6)
595 return false;
596
597 /* Reports of major corruption with ILK vt'd enabled */
598 if (needs_ilk_vtd_wa())
599 return false;
600
601 return true;
602}
603
604static int intel_gtt_init(void)
605{
606 u32 gtt_map_size;
607 int ret, bar;
608
609 ret = intel_private.driver->setup();
610 if (ret != 0)
611 return ret;
612
613 intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
614 intel_private.gtt_total_entries = intel_gtt_total_entries();
615
616 /* save the PGETBL reg for resume */
617 intel_private.PGETBL_save =
618 readl(intel_private.registers+I810_PGETBL_CTL)
619 & ~I810_PGETBL_ENABLED;
620 /* we only ever restore the register when enabling the PGTBL... */
621 if (HAS_PGTBL_EN)
622 intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
623
624 dev_info(&intel_private.bridge_dev->dev,
625 "detected gtt size: %dK total, %dK mappable\n",
626 intel_private.gtt_total_entries * 4,
627 intel_private.gtt_mappable_entries * 4);
628
629 gtt_map_size = intel_private.gtt_total_entries * 4;
630
631 intel_private.gtt = NULL;
632 if (intel_gtt_can_wc())
633 intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
634 gtt_map_size);
635 if (intel_private.gtt == NULL)
636 intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
637 gtt_map_size);
638 if (intel_private.gtt == NULL) {
639 intel_private.driver->cleanup();
640 iounmap(intel_private.registers);
641 return -ENOMEM;
642 }
643
644#if IS_ENABLED(CONFIG_AGP_INTEL)
645 global_cache_flush(); /* FIXME: ? */
646#endif
647
648 intel_private.stolen_size = intel_gtt_stolen_size();
649
650 intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
651
652 ret = intel_gtt_setup_scratch_page();
653 if (ret != 0) {
654 intel_gtt_cleanup();
655 return ret;
656 }
657
658 if (INTEL_GTT_GEN <= 2)
659 bar = I810_GMADR_BAR;
660 else
661 bar = I915_GMADR_BAR;
662
663 intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
664 return 0;
665}
666
667#if IS_ENABLED(CONFIG_AGP_INTEL)
668static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
669 {32, 8192, 3},
670 {64, 16384, 4},
671 {128, 32768, 5},
672 {256, 65536, 6},
673 {512, 131072, 7},
674};
675
676static int intel_fake_agp_fetch_size(void)
677{
678 int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
679 unsigned int aper_size;
680 int i;
681
682 aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
683
684 for (i = 0; i < num_sizes; i++) {
685 if (aper_size == intel_fake_agp_sizes[i].size) {
686 agp_bridge->current_size =
687 (void *) (intel_fake_agp_sizes + i);
688 return aper_size;
689 }
690 }
691
692 return 0;
693}
694#endif
695
696static void i830_cleanup(void)
697{
698}
699
700/* The chipset_flush interface needs to get data that has already been
701 * flushed out of the CPU all the way out to main memory, because the GPU
702 * doesn't snoop those buffers.
703 *
704 * The 8xx series doesn't have the same lovely interface for flushing the
705 * chipset write buffers that the later chips do. According to the 865
706 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
707 * that buffer out, we just fill 1KB and clflush it out, on the assumption
708 * that it'll push whatever was in there out. It appears to work.
709 */
710static void i830_chipset_flush(void)
711{
712 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
713
714 /* Forcibly evict everything from the CPU write buffers.
715 * clflush appears to be insufficient.
716 */
717 wbinvd_on_all_cpus();
718
719 /* Now we've only seen documents for this magic bit on 855GM,
720 * we hope it exists for the other gen2 chipsets...
721 *
722 * Also works as advertised on my 845G.
723 */
724 writel(readl(intel_private.registers+I830_HIC) | (1<<31),
725 intel_private.registers+I830_HIC);
726
727 while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
728 if (time_after(jiffies, timeout))
729 break;
730
731 udelay(50);
732 }
733}
734
735static void i830_write_entry(dma_addr_t addr, unsigned int entry,
736 unsigned int flags)
737{
738 u32 pte_flags = I810_PTE_VALID;
739
740 if (flags == AGP_USER_CACHED_MEMORY)
741 pte_flags |= I830_PTE_SYSTEM_CACHED;
742
743 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
744}
745
746bool intel_enable_gtt(void)
747{
748 u8 __iomem *reg;
749
750 if (INTEL_GTT_GEN == 2) {
751 u16 gmch_ctrl;
752
753 pci_read_config_word(intel_private.bridge_dev,
754 I830_GMCH_CTRL, &gmch_ctrl);
755 gmch_ctrl |= I830_GMCH_ENABLED;
756 pci_write_config_word(intel_private.bridge_dev,
757 I830_GMCH_CTRL, gmch_ctrl);
758
759 pci_read_config_word(intel_private.bridge_dev,
760 I830_GMCH_CTRL, &gmch_ctrl);
761 if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
762 dev_err(&intel_private.pcidev->dev,
763 "failed to enable the GTT: GMCH_CTRL=%x\n",
764 gmch_ctrl);
765 return false;
766 }
767 }
768
769 /* On the resume path we may be adjusting the PGTBL value, so
770 * be paranoid and flush all chipset write buffers...
771 */
772 if (INTEL_GTT_GEN >= 3)
773 writel(0, intel_private.registers+GFX_FLSH_CNTL);
774
775 reg = intel_private.registers+I810_PGETBL_CTL;
776 writel(intel_private.PGETBL_save, reg);
777 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
778 dev_err(&intel_private.pcidev->dev,
779 "failed to enable the GTT: PGETBL=%x [expected %x]\n",
780 readl(reg), intel_private.PGETBL_save);
781 return false;
782 }
783
784 if (INTEL_GTT_GEN >= 3)
785 writel(0, intel_private.registers+GFX_FLSH_CNTL);
786
787 return true;
788}
789EXPORT_SYMBOL(intel_enable_gtt);
790
791static int i830_setup(void)
792{
793 phys_addr_t reg_addr;
794
795 reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
796
797 intel_private.registers = ioremap(reg_addr, KB(64));
798 if (!intel_private.registers)
799 return -ENOMEM;
800
801 intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
802
803 return 0;
804}
805
806#if IS_ENABLED(CONFIG_AGP_INTEL)
807static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
808{
809 agp_bridge->gatt_table_real = NULL;
810 agp_bridge->gatt_table = NULL;
811 agp_bridge->gatt_bus_addr = 0;
812
813 return 0;
814}
815
816static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
817{
818 return 0;
819}
820
821static int intel_fake_agp_configure(void)
822{
823 if (!intel_enable_gtt())
824 return -EIO;
825
826 intel_private.clear_fake_agp = true;
827 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
828
829 return 0;
830}
831#endif
832
833static bool i830_check_flags(unsigned int flags)
834{
835 switch (flags) {
836 case 0:
837 case AGP_PHYS_MEMORY:
838 case AGP_USER_CACHED_MEMORY:
839 case AGP_USER_MEMORY:
840 return true;
841 }
842
843 return false;
844}
845
846void intel_gtt_insert_page(dma_addr_t addr,
847 unsigned int pg,
848 unsigned int flags)
849{
850 intel_private.driver->write_entry(addr, pg, flags);
851 readl(intel_private.gtt + pg);
852 if (intel_private.driver->chipset_flush)
853 intel_private.driver->chipset_flush();
854}
855EXPORT_SYMBOL(intel_gtt_insert_page);
856
857void intel_gtt_insert_sg_entries(struct sg_table *st,
858 unsigned int pg_start,
859 unsigned int flags)
860{
861 struct scatterlist *sg;
862 unsigned int len, m;
863 int i, j;
864
865 j = pg_start;
866
867 /* sg may merge pages, but we have to separate
868 * per-page addr for GTT */
869 for_each_sg(st->sgl, sg, st->nents, i) {
870 len = sg_dma_len(sg) >> PAGE_SHIFT;
871 for (m = 0; m < len; m++) {
872 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
873 intel_private.driver->write_entry(addr, j, flags);
874 j++;
875 }
876 }
877 readl(intel_private.gtt + j - 1);
878 if (intel_private.driver->chipset_flush)
879 intel_private.driver->chipset_flush();
880}
881EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
882
883#if IS_ENABLED(CONFIG_AGP_INTEL)
884static void intel_gtt_insert_pages(unsigned int first_entry,
885 unsigned int num_entries,
886 struct page **pages,
887 unsigned int flags)
888{
889 int i, j;
890
891 for (i = 0, j = first_entry; i < num_entries; i++, j++) {
892 dma_addr_t addr = page_to_phys(pages[i]);
893 intel_private.driver->write_entry(addr,
894 j, flags);
895 }
896 wmb();
897}
898
899static int intel_fake_agp_insert_entries(struct agp_memory *mem,
900 off_t pg_start, int type)
901{
902 int ret = -EINVAL;
903
904 if (intel_private.clear_fake_agp) {
905 int start = intel_private.stolen_size / PAGE_SIZE;
906 int end = intel_private.gtt_mappable_entries;
907 intel_gtt_clear_range(start, end - start);
908 intel_private.clear_fake_agp = false;
909 }
910
911 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
912 return i810_insert_dcache_entries(mem, pg_start, type);
913
914 if (mem->page_count == 0)
915 goto out;
916
917 if (pg_start + mem->page_count > intel_private.gtt_total_entries)
918 goto out_err;
919
920 if (type != mem->type)
921 goto out_err;
922
923 if (!intel_private.driver->check_flags(type))
924 goto out_err;
925
926 if (!mem->is_flushed)
927 global_cache_flush();
928
929 if (intel_private.needs_dmar) {
930 struct sg_table st;
931
932 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
933 if (ret != 0)
934 return ret;
935
936 intel_gtt_insert_sg_entries(&st, pg_start, type);
937 mem->sg_list = st.sgl;
938 mem->num_sg = st.nents;
939 } else
940 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
941 type);
942
943out:
944 ret = 0;
945out_err:
946 mem->is_flushed = true;
947 return ret;
948}
949#endif
950
951void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
952{
953 unsigned int i;
954
955 for (i = first_entry; i < (first_entry + num_entries); i++) {
956 intel_private.driver->write_entry(intel_private.scratch_page_dma,
957 i, 0);
958 }
959 wmb();
960}
961EXPORT_SYMBOL(intel_gtt_clear_range);
962
963#if IS_ENABLED(CONFIG_AGP_INTEL)
964static int intel_fake_agp_remove_entries(struct agp_memory *mem,
965 off_t pg_start, int type)
966{
967 if (mem->page_count == 0)
968 return 0;
969
970 intel_gtt_clear_range(pg_start, mem->page_count);
971
972 if (intel_private.needs_dmar) {
973 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
974 mem->sg_list = NULL;
975 mem->num_sg = 0;
976 }
977
978 return 0;
979}
980
981static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
982 int type)
983{
984 struct agp_memory *new;
985
986 if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
987 if (pg_count != intel_private.num_dcache_entries)
988 return NULL;
989
990 new = agp_create_memory(1);
991 if (new == NULL)
992 return NULL;
993
994 new->type = AGP_DCACHE_MEMORY;
995 new->page_count = pg_count;
996 new->num_scratch_pages = 0;
997 agp_free_page_array(new);
998 return new;
999 }
1000 if (type == AGP_PHYS_MEMORY)
1001 return alloc_agpphysmem_i8xx(pg_count, type);
1002 /* always return NULL for other allocation types for now */
1003 return NULL;
1004}
1005#endif
1006
1007static int intel_alloc_chipset_flush_resource(void)
1008{
1009 int ret;
1010 ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1011 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1012 pcibios_align_resource, intel_private.bridge_dev);
1013
1014 return ret;
1015}
1016
1017static void intel_i915_setup_chipset_flush(void)
1018{
1019 int ret;
1020 u32 temp;
1021
1022 pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1023 if (!(temp & 0x1)) {
1024 intel_alloc_chipset_flush_resource();
1025 intel_private.resource_valid = 1;
1026 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1027 } else {
1028 temp &= ~1;
1029
1030 intel_private.resource_valid = 1;
1031 intel_private.ifp_resource.start = temp;
1032 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1033 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1034 /* some BIOSes reserve this area in a pnp some don't */
1035 if (ret)
1036 intel_private.resource_valid = 0;
1037 }
1038}
1039
1040static void intel_i965_g33_setup_chipset_flush(void)
1041{
1042 u32 temp_hi, temp_lo;
1043 int ret;
1044
1045 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1046 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1047
1048 if (!(temp_lo & 0x1)) {
1049
1050 intel_alloc_chipset_flush_resource();
1051
1052 intel_private.resource_valid = 1;
1053 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1054 upper_32_bits(intel_private.ifp_resource.start));
1055 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1056 } else {
1057 u64 l64;
1058
1059 temp_lo &= ~0x1;
1060 l64 = ((u64)temp_hi << 32) | temp_lo;
1061
1062 intel_private.resource_valid = 1;
1063 intel_private.ifp_resource.start = l64;
1064 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1065 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1066 /* some BIOSes reserve this area in a pnp some don't */
1067 if (ret)
1068 intel_private.resource_valid = 0;
1069 }
1070}
1071
1072static void intel_i9xx_setup_flush(void)
1073{
1074 /* return if already configured */
1075 if (intel_private.ifp_resource.start)
1076 return;
1077
1078 if (INTEL_GTT_GEN == 6)
1079 return;
1080
1081 /* setup a resource for this object */
1082 intel_private.ifp_resource.name = "Intel Flush Page";
1083 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1084
1085 /* Setup chipset flush for 915 */
1086 if (IS_G33 || INTEL_GTT_GEN >= 4) {
1087 intel_i965_g33_setup_chipset_flush();
1088 } else {
1089 intel_i915_setup_chipset_flush();
1090 }
1091
1092 if (intel_private.ifp_resource.start)
1093 intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
1094 if (!intel_private.i9xx_flush_page)
1095 dev_err(&intel_private.pcidev->dev,
1096 "can't ioremap flush page - no chipset flushing\n");
1097}
1098
1099static void i9xx_cleanup(void)
1100{
1101 if (intel_private.i9xx_flush_page)
1102 iounmap(intel_private.i9xx_flush_page);
1103 if (intel_private.resource_valid)
1104 release_resource(&intel_private.ifp_resource);
1105 intel_private.ifp_resource.start = 0;
1106 intel_private.resource_valid = 0;
1107}
1108
1109static void i9xx_chipset_flush(void)
1110{
1111 wmb();
1112 if (intel_private.i9xx_flush_page)
1113 writel(1, intel_private.i9xx_flush_page);
1114}
1115
1116static void i965_write_entry(dma_addr_t addr,
1117 unsigned int entry,
1118 unsigned int flags)
1119{
1120 u32 pte_flags;
1121
1122 pte_flags = I810_PTE_VALID;
1123 if (flags == AGP_USER_CACHED_MEMORY)
1124 pte_flags |= I830_PTE_SYSTEM_CACHED;
1125
1126 /* Shift high bits down */
1127 addr |= (addr >> 28) & 0xf0;
1128 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
1129}
1130
1131static int i9xx_setup(void)
1132{
1133 phys_addr_t reg_addr;
1134 int size = KB(512);
1135
1136 reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
1137
1138 intel_private.registers = ioremap(reg_addr, size);
1139 if (!intel_private.registers)
1140 return -ENOMEM;
1141
1142 switch (INTEL_GTT_GEN) {
1143 case 3:
1144 intel_private.gtt_phys_addr =
1145 pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
1146 break;
1147 case 5:
1148 intel_private.gtt_phys_addr = reg_addr + MB(2);
1149 break;
1150 default:
1151 intel_private.gtt_phys_addr = reg_addr + KB(512);
1152 break;
1153 }
1154
1155 intel_i9xx_setup_flush();
1156
1157 return 0;
1158}
1159
1160#if IS_ENABLED(CONFIG_AGP_INTEL)
1161static const struct agp_bridge_driver intel_fake_agp_driver = {
1162 .owner = THIS_MODULE,
1163 .size_type = FIXED_APER_SIZE,
1164 .aperture_sizes = intel_fake_agp_sizes,
1165 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1166 .configure = intel_fake_agp_configure,
1167 .fetch_size = intel_fake_agp_fetch_size,
1168 .cleanup = intel_gtt_cleanup,
1169 .agp_enable = intel_fake_agp_enable,
1170 .cache_flush = global_cache_flush,
1171 .create_gatt_table = intel_fake_agp_create_gatt_table,
1172 .free_gatt_table = intel_fake_agp_free_gatt_table,
1173 .insert_memory = intel_fake_agp_insert_entries,
1174 .remove_memory = intel_fake_agp_remove_entries,
1175 .alloc_by_type = intel_fake_agp_alloc_by_type,
1176 .free_by_type = intel_i810_free_by_type,
1177 .agp_alloc_page = agp_generic_alloc_page,
1178 .agp_alloc_pages = agp_generic_alloc_pages,
1179 .agp_destroy_page = agp_generic_destroy_page,
1180 .agp_destroy_pages = agp_generic_destroy_pages,
1181};
1182#endif
1183
1184static const struct intel_gtt_driver i81x_gtt_driver = {
1185 .gen = 1,
1186 .has_pgtbl_enable = 1,
1187 .dma_mask_size = 32,
1188 .setup = i810_setup,
1189 .cleanup = i810_cleanup,
1190 .check_flags = i830_check_flags,
1191 .write_entry = i810_write_entry,
1192};
1193static const struct intel_gtt_driver i8xx_gtt_driver = {
1194 .gen = 2,
1195 .has_pgtbl_enable = 1,
1196 .setup = i830_setup,
1197 .cleanup = i830_cleanup,
1198 .write_entry = i830_write_entry,
1199 .dma_mask_size = 32,
1200 .check_flags = i830_check_flags,
1201 .chipset_flush = i830_chipset_flush,
1202};
1203static const struct intel_gtt_driver i915_gtt_driver = {
1204 .gen = 3,
1205 .has_pgtbl_enable = 1,
1206 .setup = i9xx_setup,
1207 .cleanup = i9xx_cleanup,
1208 /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1209 .write_entry = i830_write_entry,
1210 .dma_mask_size = 32,
1211 .check_flags = i830_check_flags,
1212 .chipset_flush = i9xx_chipset_flush,
1213};
1214static const struct intel_gtt_driver g33_gtt_driver = {
1215 .gen = 3,
1216 .is_g33 = 1,
1217 .setup = i9xx_setup,
1218 .cleanup = i9xx_cleanup,
1219 .write_entry = i965_write_entry,
1220 .dma_mask_size = 36,
1221 .check_flags = i830_check_flags,
1222 .chipset_flush = i9xx_chipset_flush,
1223};
1224static const struct intel_gtt_driver pineview_gtt_driver = {
1225 .gen = 3,
1226 .is_pineview = 1, .is_g33 = 1,
1227 .setup = i9xx_setup,
1228 .cleanup = i9xx_cleanup,
1229 .write_entry = i965_write_entry,
1230 .dma_mask_size = 36,
1231 .check_flags = i830_check_flags,
1232 .chipset_flush = i9xx_chipset_flush,
1233};
1234static const struct intel_gtt_driver i965_gtt_driver = {
1235 .gen = 4,
1236 .has_pgtbl_enable = 1,
1237 .setup = i9xx_setup,
1238 .cleanup = i9xx_cleanup,
1239 .write_entry = i965_write_entry,
1240 .dma_mask_size = 36,
1241 .check_flags = i830_check_flags,
1242 .chipset_flush = i9xx_chipset_flush,
1243};
1244static const struct intel_gtt_driver g4x_gtt_driver = {
1245 .gen = 5,
1246 .setup = i9xx_setup,
1247 .cleanup = i9xx_cleanup,
1248 .write_entry = i965_write_entry,
1249 .dma_mask_size = 36,
1250 .check_flags = i830_check_flags,
1251 .chipset_flush = i9xx_chipset_flush,
1252};
1253static const struct intel_gtt_driver ironlake_gtt_driver = {
1254 .gen = 5,
1255 .is_ironlake = 1,
1256 .setup = i9xx_setup,
1257 .cleanup = i9xx_cleanup,
1258 .write_entry = i965_write_entry,
1259 .dma_mask_size = 36,
1260 .check_flags = i830_check_flags,
1261 .chipset_flush = i9xx_chipset_flush,
1262};
1263
1264/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1265 * driver and gmch_driver must be non-null, and find_gmch will determine
1266 * which one should be used if a gmch_chip_id is present.
1267 */
1268static const struct intel_gtt_driver_description {
1269 unsigned int gmch_chip_id;
1270 char *name;
1271 const struct intel_gtt_driver *gtt_driver;
1272} intel_gtt_chipsets[] = {
1273 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1274 &i81x_gtt_driver},
1275 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1276 &i81x_gtt_driver},
1277 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1278 &i81x_gtt_driver},
1279 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1280 &i81x_gtt_driver},
1281 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1282 &i8xx_gtt_driver},
1283 { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
1284 &i8xx_gtt_driver},
1285 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1286 &i8xx_gtt_driver},
1287 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1288 &i8xx_gtt_driver},
1289 { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1290 &i8xx_gtt_driver},
1291 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1292 &i915_gtt_driver },
1293 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1294 &i915_gtt_driver },
1295 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1296 &i915_gtt_driver },
1297 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1298 &i915_gtt_driver },
1299 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1300 &i915_gtt_driver },
1301 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1302 &i915_gtt_driver },
1303 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1304 &i965_gtt_driver },
1305 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1306 &i965_gtt_driver },
1307 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1308 &i965_gtt_driver },
1309 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1310 &i965_gtt_driver },
1311 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1312 &i965_gtt_driver },
1313 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1314 &i965_gtt_driver },
1315 { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1316 &g33_gtt_driver },
1317 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1318 &g33_gtt_driver },
1319 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1320 &g33_gtt_driver },
1321 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1322 &pineview_gtt_driver },
1323 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1324 &pineview_gtt_driver },
1325 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1326 &g4x_gtt_driver },
1327 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1328 &g4x_gtt_driver },
1329 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1330 &g4x_gtt_driver },
1331 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1332 &g4x_gtt_driver },
1333 { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1334 &g4x_gtt_driver },
1335 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1336 &g4x_gtt_driver },
1337 { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1338 &g4x_gtt_driver },
1339 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1340 "HD Graphics", &ironlake_gtt_driver },
1341 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1342 "HD Graphics", &ironlake_gtt_driver },
1343 { 0, NULL, NULL }
1344};
1345
1346static int find_gmch(u16 device)
1347{
1348 struct pci_dev *gmch_device;
1349
1350 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1351 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1352 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1353 device, gmch_device);
1354 }
1355
1356 if (!gmch_device)
1357 return 0;
1358
1359 intel_private.pcidev = gmch_device;
1360 return 1;
1361}
1362
1363int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1364 struct agp_bridge_data *bridge)
1365{
1366 int i, mask;
1367
1368 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1369 if (gpu_pdev) {
1370 if (gpu_pdev->device ==
1371 intel_gtt_chipsets[i].gmch_chip_id) {
1372 intel_private.pcidev = pci_dev_get(gpu_pdev);
1373 intel_private.driver =
1374 intel_gtt_chipsets[i].gtt_driver;
1375
1376 break;
1377 }
1378 } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1379 intel_private.driver =
1380 intel_gtt_chipsets[i].gtt_driver;
1381 break;
1382 }
1383 }
1384
1385 if (!intel_private.driver)
1386 return 0;
1387
1388#if IS_ENABLED(CONFIG_AGP_INTEL)
1389 if (bridge) {
1390 if (INTEL_GTT_GEN > 1)
1391 return 0;
1392
1393 bridge->driver = &intel_fake_agp_driver;
1394 bridge->dev_private_data = &intel_private;
1395 bridge->dev = bridge_pdev;
1396 }
1397#endif
1398
1399
1400 /*
1401 * Can be called from the fake agp driver but also directly from
1402 * drm/i915.ko. Hence we need to check whether everything is set up
1403 * already.
1404 */
1405 if (intel_private.refcount++)
1406 return 1;
1407
1408 intel_private.bridge_dev = pci_dev_get(bridge_pdev);
1409
1410 dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1411
1412 if (bridge) {
1413 mask = intel_private.driver->dma_mask_size;
1414 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1415 dev_err(&intel_private.pcidev->dev,
1416 "set gfx device dma mask %d-bit failed!\n",
1417 mask);
1418 else
1419 pci_set_consistent_dma_mask(intel_private.pcidev,
1420 DMA_BIT_MASK(mask));
1421 }
1422
1423 if (intel_gtt_init() != 0) {
1424 intel_gmch_remove();
1425
1426 return 0;
1427 }
1428
1429 return 1;
1430}
1431EXPORT_SYMBOL(intel_gmch_probe);
1432
1433void intel_gtt_get(u64 *gtt_total,
1434 phys_addr_t *mappable_base,
1435 resource_size_t *mappable_end)
1436{
1437 *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1438 *mappable_base = intel_private.gma_bus_addr;
1439 *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
1440}
1441EXPORT_SYMBOL(intel_gtt_get);
1442
1443void intel_gtt_chipset_flush(void)
1444{
1445 if (intel_private.driver->chipset_flush)
1446 intel_private.driver->chipset_flush();
1447}
1448EXPORT_SYMBOL(intel_gtt_chipset_flush);
1449
1450void intel_gmch_remove(void)
1451{
1452 if (--intel_private.refcount)
1453 return;
1454
1455 if (intel_private.scratch_page)
1456 intel_gtt_teardown_scratch_page();
1457 if (intel_private.pcidev)
1458 pci_dev_put(intel_private.pcidev);
1459 if (intel_private.bridge_dev)
1460 pci_dev_put(intel_private.bridge_dev);
1461 intel_private.driver = NULL;
1462}
1463EXPORT_SYMBOL(intel_gmch_remove);
1464
1465MODULE_AUTHOR("Dave Jones, Various @Intel");
1466MODULE_LICENSE("GPL and additional rights");
1/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18#include <linux/module.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/pagemap.h>
23#include <linux/agp_backend.h>
24#include <linux/delay.h>
25#include <asm/smp.h>
26#include "agp.h"
27#include "intel-agp.h"
28#include <drm/intel-gtt.h>
29
30/*
31 * If we have Intel graphics, we're not going to have anything other than
32 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33 * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
34 * Only newer chipsets need to bother with this, of course.
35 */
36#ifdef CONFIG_INTEL_IOMMU
37#define USE_PCI_DMA_API 1
38#else
39#define USE_PCI_DMA_API 0
40#endif
41
42struct intel_gtt_driver {
43 unsigned int gen : 8;
44 unsigned int is_g33 : 1;
45 unsigned int is_pineview : 1;
46 unsigned int is_ironlake : 1;
47 unsigned int has_pgtbl_enable : 1;
48 unsigned int dma_mask_size : 8;
49 /* Chipset specific GTT setup */
50 int (*setup)(void);
51 /* This should undo anything done in ->setup() save the unmapping
52 * of the mmio register file, that's done in the generic code. */
53 void (*cleanup)(void);
54 void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
55 /* Flags is a more or less chipset specific opaque value.
56 * For chipsets that need to support old ums (non-gem) code, this
57 * needs to be identical to the various supported agp memory types! */
58 bool (*check_flags)(unsigned int flags);
59 void (*chipset_flush)(void);
60};
61
62static struct _intel_private {
63 struct intel_gtt base;
64 const struct intel_gtt_driver *driver;
65 struct pci_dev *pcidev; /* device one */
66 struct pci_dev *bridge_dev;
67 u8 __iomem *registers;
68 phys_addr_t gtt_bus_addr;
69 phys_addr_t gma_bus_addr;
70 u32 PGETBL_save;
71 u32 __iomem *gtt; /* I915G */
72 bool clear_fake_agp; /* on first access via agp, fill with scratch */
73 int num_dcache_entries;
74 void __iomem *i9xx_flush_page;
75 char *i81x_gtt_table;
76 struct resource ifp_resource;
77 int resource_valid;
78 struct page *scratch_page;
79} intel_private;
80
81#define INTEL_GTT_GEN intel_private.driver->gen
82#define IS_G33 intel_private.driver->is_g33
83#define IS_PINEVIEW intel_private.driver->is_pineview
84#define IS_IRONLAKE intel_private.driver->is_ironlake
85#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
86
87int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
88 struct scatterlist **sg_list, int *num_sg)
89{
90 struct sg_table st;
91 struct scatterlist *sg;
92 int i;
93
94 if (*sg_list)
95 return 0; /* already mapped (for e.g. resume */
96
97 DBG("try mapping %lu pages\n", (unsigned long)num_entries);
98
99 if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
100 goto err;
101
102 *sg_list = sg = st.sgl;
103
104 for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
105 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
106
107 *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
108 num_entries, PCI_DMA_BIDIRECTIONAL);
109 if (unlikely(!*num_sg))
110 goto err;
111
112 return 0;
113
114err:
115 sg_free_table(&st);
116 return -ENOMEM;
117}
118EXPORT_SYMBOL(intel_gtt_map_memory);
119
120void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
121{
122 struct sg_table st;
123 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
124
125 pci_unmap_sg(intel_private.pcidev, sg_list,
126 num_sg, PCI_DMA_BIDIRECTIONAL);
127
128 st.sgl = sg_list;
129 st.orig_nents = st.nents = num_sg;
130
131 sg_free_table(&st);
132}
133EXPORT_SYMBOL(intel_gtt_unmap_memory);
134
135static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
136{
137 return;
138}
139
140/* Exists to support ARGB cursors */
141static struct page *i8xx_alloc_pages(void)
142{
143 struct page *page;
144
145 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
146 if (page == NULL)
147 return NULL;
148
149 if (set_pages_uc(page, 4) < 0) {
150 set_pages_wb(page, 4);
151 __free_pages(page, 2);
152 return NULL;
153 }
154 get_page(page);
155 atomic_inc(&agp_bridge->current_memory_agp);
156 return page;
157}
158
159static void i8xx_destroy_pages(struct page *page)
160{
161 if (page == NULL)
162 return;
163
164 set_pages_wb(page, 4);
165 put_page(page);
166 __free_pages(page, 2);
167 atomic_dec(&agp_bridge->current_memory_agp);
168}
169
170#define I810_GTT_ORDER 4
171static int i810_setup(void)
172{
173 u32 reg_addr;
174 char *gtt_table;
175
176 /* i81x does not preallocate the gtt. It's always 64kb in size. */
177 gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
178 if (gtt_table == NULL)
179 return -ENOMEM;
180 intel_private.i81x_gtt_table = gtt_table;
181
182 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, ®_addr);
183 reg_addr &= 0xfff80000;
184
185 intel_private.registers = ioremap(reg_addr, KB(64));
186 if (!intel_private.registers)
187 return -ENOMEM;
188
189 writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
190 intel_private.registers+I810_PGETBL_CTL);
191
192 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
193
194 if ((readl(intel_private.registers+I810_DRAM_CTL)
195 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
196 dev_info(&intel_private.pcidev->dev,
197 "detected 4MB dedicated video ram\n");
198 intel_private.num_dcache_entries = 1024;
199 }
200
201 return 0;
202}
203
204static void i810_cleanup(void)
205{
206 writel(0, intel_private.registers+I810_PGETBL_CTL);
207 free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
208}
209
210static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
211 int type)
212{
213 int i;
214
215 if ((pg_start + mem->page_count)
216 > intel_private.num_dcache_entries)
217 return -EINVAL;
218
219 if (!mem->is_flushed)
220 global_cache_flush();
221
222 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
223 dma_addr_t addr = i << PAGE_SHIFT;
224 intel_private.driver->write_entry(addr,
225 i, type);
226 }
227 readl(intel_private.gtt+i-1);
228
229 return 0;
230}
231
232/*
233 * The i810/i830 requires a physical address to program its mouse
234 * pointer into hardware.
235 * However the Xserver still writes to it through the agp aperture.
236 */
237static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
238{
239 struct agp_memory *new;
240 struct page *page;
241
242 switch (pg_count) {
243 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
244 break;
245 case 4:
246 /* kludge to get 4 physical pages for ARGB cursor */
247 page = i8xx_alloc_pages();
248 break;
249 default:
250 return NULL;
251 }
252
253 if (page == NULL)
254 return NULL;
255
256 new = agp_create_memory(pg_count);
257 if (new == NULL)
258 return NULL;
259
260 new->pages[0] = page;
261 if (pg_count == 4) {
262 /* kludge to get 4 physical pages for ARGB cursor */
263 new->pages[1] = new->pages[0] + 1;
264 new->pages[2] = new->pages[1] + 1;
265 new->pages[3] = new->pages[2] + 1;
266 }
267 new->page_count = pg_count;
268 new->num_scratch_pages = pg_count;
269 new->type = AGP_PHYS_MEMORY;
270 new->physical = page_to_phys(new->pages[0]);
271 return new;
272}
273
274static void intel_i810_free_by_type(struct agp_memory *curr)
275{
276 agp_free_key(curr->key);
277 if (curr->type == AGP_PHYS_MEMORY) {
278 if (curr->page_count == 4)
279 i8xx_destroy_pages(curr->pages[0]);
280 else {
281 agp_bridge->driver->agp_destroy_page(curr->pages[0],
282 AGP_PAGE_DESTROY_UNMAP);
283 agp_bridge->driver->agp_destroy_page(curr->pages[0],
284 AGP_PAGE_DESTROY_FREE);
285 }
286 agp_free_page_array(curr);
287 }
288 kfree(curr);
289}
290
291static int intel_gtt_setup_scratch_page(void)
292{
293 struct page *page;
294 dma_addr_t dma_addr;
295
296 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
297 if (page == NULL)
298 return -ENOMEM;
299 get_page(page);
300 set_pages_uc(page, 1);
301
302 if (intel_private.base.needs_dmar) {
303 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
304 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
305 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
306 return -EINVAL;
307
308 intel_private.base.scratch_page_dma = dma_addr;
309 } else
310 intel_private.base.scratch_page_dma = page_to_phys(page);
311
312 intel_private.scratch_page = page;
313
314 return 0;
315}
316
317static void i810_write_entry(dma_addr_t addr, unsigned int entry,
318 unsigned int flags)
319{
320 u32 pte_flags = I810_PTE_VALID;
321
322 switch (flags) {
323 case AGP_DCACHE_MEMORY:
324 pte_flags |= I810_PTE_LOCAL;
325 break;
326 case AGP_USER_CACHED_MEMORY:
327 pte_flags |= I830_PTE_SYSTEM_CACHED;
328 break;
329 }
330
331 writel(addr | pte_flags, intel_private.gtt + entry);
332}
333
334static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
335 {32, 8192, 3},
336 {64, 16384, 4},
337 {128, 32768, 5},
338 {256, 65536, 6},
339 {512, 131072, 7},
340};
341
342static unsigned int intel_gtt_stolen_size(void)
343{
344 u16 gmch_ctrl;
345 u8 rdct;
346 int local = 0;
347 static const int ddt[4] = { 0, 16, 32, 64 };
348 unsigned int stolen_size = 0;
349
350 if (INTEL_GTT_GEN == 1)
351 return 0; /* no stolen mem on i81x */
352
353 pci_read_config_word(intel_private.bridge_dev,
354 I830_GMCH_CTRL, &gmch_ctrl);
355
356 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
357 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
358 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
359 case I830_GMCH_GMS_STOLEN_512:
360 stolen_size = KB(512);
361 break;
362 case I830_GMCH_GMS_STOLEN_1024:
363 stolen_size = MB(1);
364 break;
365 case I830_GMCH_GMS_STOLEN_8192:
366 stolen_size = MB(8);
367 break;
368 case I830_GMCH_GMS_LOCAL:
369 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
370 stolen_size = (I830_RDRAM_ND(rdct) + 1) *
371 MB(ddt[I830_RDRAM_DDT(rdct)]);
372 local = 1;
373 break;
374 default:
375 stolen_size = 0;
376 break;
377 }
378 } else if (INTEL_GTT_GEN == 6) {
379 /*
380 * SandyBridge has new memory control reg at 0x50.w
381 */
382 u16 snb_gmch_ctl;
383 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
384 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
385 case SNB_GMCH_GMS_STOLEN_32M:
386 stolen_size = MB(32);
387 break;
388 case SNB_GMCH_GMS_STOLEN_64M:
389 stolen_size = MB(64);
390 break;
391 case SNB_GMCH_GMS_STOLEN_96M:
392 stolen_size = MB(96);
393 break;
394 case SNB_GMCH_GMS_STOLEN_128M:
395 stolen_size = MB(128);
396 break;
397 case SNB_GMCH_GMS_STOLEN_160M:
398 stolen_size = MB(160);
399 break;
400 case SNB_GMCH_GMS_STOLEN_192M:
401 stolen_size = MB(192);
402 break;
403 case SNB_GMCH_GMS_STOLEN_224M:
404 stolen_size = MB(224);
405 break;
406 case SNB_GMCH_GMS_STOLEN_256M:
407 stolen_size = MB(256);
408 break;
409 case SNB_GMCH_GMS_STOLEN_288M:
410 stolen_size = MB(288);
411 break;
412 case SNB_GMCH_GMS_STOLEN_320M:
413 stolen_size = MB(320);
414 break;
415 case SNB_GMCH_GMS_STOLEN_352M:
416 stolen_size = MB(352);
417 break;
418 case SNB_GMCH_GMS_STOLEN_384M:
419 stolen_size = MB(384);
420 break;
421 case SNB_GMCH_GMS_STOLEN_416M:
422 stolen_size = MB(416);
423 break;
424 case SNB_GMCH_GMS_STOLEN_448M:
425 stolen_size = MB(448);
426 break;
427 case SNB_GMCH_GMS_STOLEN_480M:
428 stolen_size = MB(480);
429 break;
430 case SNB_GMCH_GMS_STOLEN_512M:
431 stolen_size = MB(512);
432 break;
433 }
434 } else {
435 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
436 case I855_GMCH_GMS_STOLEN_1M:
437 stolen_size = MB(1);
438 break;
439 case I855_GMCH_GMS_STOLEN_4M:
440 stolen_size = MB(4);
441 break;
442 case I855_GMCH_GMS_STOLEN_8M:
443 stolen_size = MB(8);
444 break;
445 case I855_GMCH_GMS_STOLEN_16M:
446 stolen_size = MB(16);
447 break;
448 case I855_GMCH_GMS_STOLEN_32M:
449 stolen_size = MB(32);
450 break;
451 case I915_GMCH_GMS_STOLEN_48M:
452 stolen_size = MB(48);
453 break;
454 case I915_GMCH_GMS_STOLEN_64M:
455 stolen_size = MB(64);
456 break;
457 case G33_GMCH_GMS_STOLEN_128M:
458 stolen_size = MB(128);
459 break;
460 case G33_GMCH_GMS_STOLEN_256M:
461 stolen_size = MB(256);
462 break;
463 case INTEL_GMCH_GMS_STOLEN_96M:
464 stolen_size = MB(96);
465 break;
466 case INTEL_GMCH_GMS_STOLEN_160M:
467 stolen_size = MB(160);
468 break;
469 case INTEL_GMCH_GMS_STOLEN_224M:
470 stolen_size = MB(224);
471 break;
472 case INTEL_GMCH_GMS_STOLEN_352M:
473 stolen_size = MB(352);
474 break;
475 default:
476 stolen_size = 0;
477 break;
478 }
479 }
480
481 if (stolen_size > 0) {
482 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
483 stolen_size / KB(1), local ? "local" : "stolen");
484 } else {
485 dev_info(&intel_private.bridge_dev->dev,
486 "no pre-allocated video memory detected\n");
487 stolen_size = 0;
488 }
489
490 return stolen_size;
491}
492
493static void i965_adjust_pgetbl_size(unsigned int size_flag)
494{
495 u32 pgetbl_ctl, pgetbl_ctl2;
496
497 /* ensure that ppgtt is disabled */
498 pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
499 pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
500 writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
501
502 /* write the new ggtt size */
503 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
504 pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
505 pgetbl_ctl |= size_flag;
506 writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
507}
508
509static unsigned int i965_gtt_total_entries(void)
510{
511 int size;
512 u32 pgetbl_ctl;
513 u16 gmch_ctl;
514
515 pci_read_config_word(intel_private.bridge_dev,
516 I830_GMCH_CTRL, &gmch_ctl);
517
518 if (INTEL_GTT_GEN == 5) {
519 switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
520 case G4x_GMCH_SIZE_1M:
521 case G4x_GMCH_SIZE_VT_1M:
522 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
523 break;
524 case G4x_GMCH_SIZE_VT_1_5M:
525 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
526 break;
527 case G4x_GMCH_SIZE_2M:
528 case G4x_GMCH_SIZE_VT_2M:
529 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
530 break;
531 }
532 }
533
534 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
535
536 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
537 case I965_PGETBL_SIZE_128KB:
538 size = KB(128);
539 break;
540 case I965_PGETBL_SIZE_256KB:
541 size = KB(256);
542 break;
543 case I965_PGETBL_SIZE_512KB:
544 size = KB(512);
545 break;
546 /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
547 case I965_PGETBL_SIZE_1MB:
548 size = KB(1024);
549 break;
550 case I965_PGETBL_SIZE_2MB:
551 size = KB(2048);
552 break;
553 case I965_PGETBL_SIZE_1_5MB:
554 size = KB(1024 + 512);
555 break;
556 default:
557 dev_info(&intel_private.pcidev->dev,
558 "unknown page table size, assuming 512KB\n");
559 size = KB(512);
560 }
561
562 return size/4;
563}
564
565static unsigned int intel_gtt_total_entries(void)
566{
567 int size;
568
569 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
570 return i965_gtt_total_entries();
571 else if (INTEL_GTT_GEN == 6) {
572 u16 snb_gmch_ctl;
573
574 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
575 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
576 default:
577 case SNB_GTT_SIZE_0M:
578 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
579 size = MB(0);
580 break;
581 case SNB_GTT_SIZE_1M:
582 size = MB(1);
583 break;
584 case SNB_GTT_SIZE_2M:
585 size = MB(2);
586 break;
587 }
588 return size/4;
589 } else {
590 /* On previous hardware, the GTT size was just what was
591 * required to map the aperture.
592 */
593 return intel_private.base.gtt_mappable_entries;
594 }
595}
596
597static unsigned int intel_gtt_mappable_entries(void)
598{
599 unsigned int aperture_size;
600
601 if (INTEL_GTT_GEN == 1) {
602 u32 smram_miscc;
603
604 pci_read_config_dword(intel_private.bridge_dev,
605 I810_SMRAM_MISCC, &smram_miscc);
606
607 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
608 == I810_GFX_MEM_WIN_32M)
609 aperture_size = MB(32);
610 else
611 aperture_size = MB(64);
612 } else if (INTEL_GTT_GEN == 2) {
613 u16 gmch_ctrl;
614
615 pci_read_config_word(intel_private.bridge_dev,
616 I830_GMCH_CTRL, &gmch_ctrl);
617
618 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
619 aperture_size = MB(64);
620 else
621 aperture_size = MB(128);
622 } else {
623 /* 9xx supports large sizes, just look at the length */
624 aperture_size = pci_resource_len(intel_private.pcidev, 2);
625 }
626
627 return aperture_size >> PAGE_SHIFT;
628}
629
630static void intel_gtt_teardown_scratch_page(void)
631{
632 set_pages_wb(intel_private.scratch_page, 1);
633 pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
634 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
635 put_page(intel_private.scratch_page);
636 __free_page(intel_private.scratch_page);
637}
638
639static void intel_gtt_cleanup(void)
640{
641 intel_private.driver->cleanup();
642
643 iounmap(intel_private.gtt);
644 iounmap(intel_private.registers);
645
646 intel_gtt_teardown_scratch_page();
647}
648
649static int intel_gtt_init(void)
650{
651 u32 gtt_map_size;
652 int ret;
653
654 ret = intel_private.driver->setup();
655 if (ret != 0)
656 return ret;
657
658 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
659 intel_private.base.gtt_total_entries = intel_gtt_total_entries();
660
661 /* save the PGETBL reg for resume */
662 intel_private.PGETBL_save =
663 readl(intel_private.registers+I810_PGETBL_CTL)
664 & ~I810_PGETBL_ENABLED;
665 /* we only ever restore the register when enabling the PGTBL... */
666 if (HAS_PGTBL_EN)
667 intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
668
669 dev_info(&intel_private.bridge_dev->dev,
670 "detected gtt size: %dK total, %dK mappable\n",
671 intel_private.base.gtt_total_entries * 4,
672 intel_private.base.gtt_mappable_entries * 4);
673
674 gtt_map_size = intel_private.base.gtt_total_entries * 4;
675
676 intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
677 gtt_map_size);
678 if (!intel_private.gtt) {
679 intel_private.driver->cleanup();
680 iounmap(intel_private.registers);
681 return -ENOMEM;
682 }
683 intel_private.base.gtt = intel_private.gtt;
684
685 global_cache_flush(); /* FIXME: ? */
686
687 intel_private.base.stolen_size = intel_gtt_stolen_size();
688
689 intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
690
691 ret = intel_gtt_setup_scratch_page();
692 if (ret != 0) {
693 intel_gtt_cleanup();
694 return ret;
695 }
696
697 return 0;
698}
699
700static int intel_fake_agp_fetch_size(void)
701{
702 int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
703 unsigned int aper_size;
704 int i;
705
706 aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
707 / MB(1);
708
709 for (i = 0; i < num_sizes; i++) {
710 if (aper_size == intel_fake_agp_sizes[i].size) {
711 agp_bridge->current_size =
712 (void *) (intel_fake_agp_sizes + i);
713 return aper_size;
714 }
715 }
716
717 return 0;
718}
719
720static void i830_cleanup(void)
721{
722}
723
724/* The chipset_flush interface needs to get data that has already been
725 * flushed out of the CPU all the way out to main memory, because the GPU
726 * doesn't snoop those buffers.
727 *
728 * The 8xx series doesn't have the same lovely interface for flushing the
729 * chipset write buffers that the later chips do. According to the 865
730 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
731 * that buffer out, we just fill 1KB and clflush it out, on the assumption
732 * that it'll push whatever was in there out. It appears to work.
733 */
734static void i830_chipset_flush(void)
735{
736 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
737
738 /* Forcibly evict everything from the CPU write buffers.
739 * clflush appears to be insufficient.
740 */
741 wbinvd_on_all_cpus();
742
743 /* Now we've only seen documents for this magic bit on 855GM,
744 * we hope it exists for the other gen2 chipsets...
745 *
746 * Also works as advertised on my 845G.
747 */
748 writel(readl(intel_private.registers+I830_HIC) | (1<<31),
749 intel_private.registers+I830_HIC);
750
751 while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
752 if (time_after(jiffies, timeout))
753 break;
754
755 udelay(50);
756 }
757}
758
759static void i830_write_entry(dma_addr_t addr, unsigned int entry,
760 unsigned int flags)
761{
762 u32 pte_flags = I810_PTE_VALID;
763
764 if (flags == AGP_USER_CACHED_MEMORY)
765 pte_flags |= I830_PTE_SYSTEM_CACHED;
766
767 writel(addr | pte_flags, intel_private.gtt + entry);
768}
769
770static bool intel_enable_gtt(void)
771{
772 u32 gma_addr;
773 u8 __iomem *reg;
774
775 if (INTEL_GTT_GEN <= 2)
776 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
777 &gma_addr);
778 else
779 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
780 &gma_addr);
781
782 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
783
784 if (INTEL_GTT_GEN >= 6)
785 return true;
786
787 if (INTEL_GTT_GEN == 2) {
788 u16 gmch_ctrl;
789
790 pci_read_config_word(intel_private.bridge_dev,
791 I830_GMCH_CTRL, &gmch_ctrl);
792 gmch_ctrl |= I830_GMCH_ENABLED;
793 pci_write_config_word(intel_private.bridge_dev,
794 I830_GMCH_CTRL, gmch_ctrl);
795
796 pci_read_config_word(intel_private.bridge_dev,
797 I830_GMCH_CTRL, &gmch_ctrl);
798 if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
799 dev_err(&intel_private.pcidev->dev,
800 "failed to enable the GTT: GMCH_CTRL=%x\n",
801 gmch_ctrl);
802 return false;
803 }
804 }
805
806 /* On the resume path we may be adjusting the PGTBL value, so
807 * be paranoid and flush all chipset write buffers...
808 */
809 if (INTEL_GTT_GEN >= 3)
810 writel(0, intel_private.registers+GFX_FLSH_CNTL);
811
812 reg = intel_private.registers+I810_PGETBL_CTL;
813 writel(intel_private.PGETBL_save, reg);
814 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
815 dev_err(&intel_private.pcidev->dev,
816 "failed to enable the GTT: PGETBL=%x [expected %x]\n",
817 readl(reg), intel_private.PGETBL_save);
818 return false;
819 }
820
821 if (INTEL_GTT_GEN >= 3)
822 writel(0, intel_private.registers+GFX_FLSH_CNTL);
823
824 return true;
825}
826
827static int i830_setup(void)
828{
829 u32 reg_addr;
830
831 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, ®_addr);
832 reg_addr &= 0xfff80000;
833
834 intel_private.registers = ioremap(reg_addr, KB(64));
835 if (!intel_private.registers)
836 return -ENOMEM;
837
838 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
839
840 return 0;
841}
842
843static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
844{
845 agp_bridge->gatt_table_real = NULL;
846 agp_bridge->gatt_table = NULL;
847 agp_bridge->gatt_bus_addr = 0;
848
849 return 0;
850}
851
852static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
853{
854 return 0;
855}
856
857static int intel_fake_agp_configure(void)
858{
859 if (!intel_enable_gtt())
860 return -EIO;
861
862 intel_private.clear_fake_agp = true;
863 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
864
865 return 0;
866}
867
868static bool i830_check_flags(unsigned int flags)
869{
870 switch (flags) {
871 case 0:
872 case AGP_PHYS_MEMORY:
873 case AGP_USER_CACHED_MEMORY:
874 case AGP_USER_MEMORY:
875 return true;
876 }
877
878 return false;
879}
880
881void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
882 unsigned int sg_len,
883 unsigned int pg_start,
884 unsigned int flags)
885{
886 struct scatterlist *sg;
887 unsigned int len, m;
888 int i, j;
889
890 j = pg_start;
891
892 /* sg may merge pages, but we have to separate
893 * per-page addr for GTT */
894 for_each_sg(sg_list, sg, sg_len, i) {
895 len = sg_dma_len(sg) >> PAGE_SHIFT;
896 for (m = 0; m < len; m++) {
897 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
898 intel_private.driver->write_entry(addr,
899 j, flags);
900 j++;
901 }
902 }
903 readl(intel_private.gtt+j-1);
904}
905EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
906
907void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
908 struct page **pages, unsigned int flags)
909{
910 int i, j;
911
912 for (i = 0, j = first_entry; i < num_entries; i++, j++) {
913 dma_addr_t addr = page_to_phys(pages[i]);
914 intel_private.driver->write_entry(addr,
915 j, flags);
916 }
917 readl(intel_private.gtt+j-1);
918}
919EXPORT_SYMBOL(intel_gtt_insert_pages);
920
921static int intel_fake_agp_insert_entries(struct agp_memory *mem,
922 off_t pg_start, int type)
923{
924 int ret = -EINVAL;
925
926 if (intel_private.base.do_idle_maps)
927 return -ENODEV;
928
929 if (intel_private.clear_fake_agp) {
930 int start = intel_private.base.stolen_size / PAGE_SIZE;
931 int end = intel_private.base.gtt_mappable_entries;
932 intel_gtt_clear_range(start, end - start);
933 intel_private.clear_fake_agp = false;
934 }
935
936 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
937 return i810_insert_dcache_entries(mem, pg_start, type);
938
939 if (mem->page_count == 0)
940 goto out;
941
942 if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
943 goto out_err;
944
945 if (type != mem->type)
946 goto out_err;
947
948 if (!intel_private.driver->check_flags(type))
949 goto out_err;
950
951 if (!mem->is_flushed)
952 global_cache_flush();
953
954 if (intel_private.base.needs_dmar) {
955 ret = intel_gtt_map_memory(mem->pages, mem->page_count,
956 &mem->sg_list, &mem->num_sg);
957 if (ret != 0)
958 return ret;
959
960 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
961 pg_start, type);
962 } else
963 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
964 type);
965
966out:
967 ret = 0;
968out_err:
969 mem->is_flushed = true;
970 return ret;
971}
972
973void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
974{
975 unsigned int i;
976
977 for (i = first_entry; i < (first_entry + num_entries); i++) {
978 intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
979 i, 0);
980 }
981 readl(intel_private.gtt+i-1);
982}
983EXPORT_SYMBOL(intel_gtt_clear_range);
984
985static int intel_fake_agp_remove_entries(struct agp_memory *mem,
986 off_t pg_start, int type)
987{
988 if (mem->page_count == 0)
989 return 0;
990
991 if (intel_private.base.do_idle_maps)
992 return -ENODEV;
993
994 intel_gtt_clear_range(pg_start, mem->page_count);
995
996 if (intel_private.base.needs_dmar) {
997 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
998 mem->sg_list = NULL;
999 mem->num_sg = 0;
1000 }
1001
1002 return 0;
1003}
1004
1005static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1006 int type)
1007{
1008 struct agp_memory *new;
1009
1010 if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
1011 if (pg_count != intel_private.num_dcache_entries)
1012 return NULL;
1013
1014 new = agp_create_memory(1);
1015 if (new == NULL)
1016 return NULL;
1017
1018 new->type = AGP_DCACHE_MEMORY;
1019 new->page_count = pg_count;
1020 new->num_scratch_pages = 0;
1021 agp_free_page_array(new);
1022 return new;
1023 }
1024 if (type == AGP_PHYS_MEMORY)
1025 return alloc_agpphysmem_i8xx(pg_count, type);
1026 /* always return NULL for other allocation types for now */
1027 return NULL;
1028}
1029
1030static int intel_alloc_chipset_flush_resource(void)
1031{
1032 int ret;
1033 ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1034 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1035 pcibios_align_resource, intel_private.bridge_dev);
1036
1037 return ret;
1038}
1039
1040static void intel_i915_setup_chipset_flush(void)
1041{
1042 int ret;
1043 u32 temp;
1044
1045 pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1046 if (!(temp & 0x1)) {
1047 intel_alloc_chipset_flush_resource();
1048 intel_private.resource_valid = 1;
1049 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1050 } else {
1051 temp &= ~1;
1052
1053 intel_private.resource_valid = 1;
1054 intel_private.ifp_resource.start = temp;
1055 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1056 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1057 /* some BIOSes reserve this area in a pnp some don't */
1058 if (ret)
1059 intel_private.resource_valid = 0;
1060 }
1061}
1062
1063static void intel_i965_g33_setup_chipset_flush(void)
1064{
1065 u32 temp_hi, temp_lo;
1066 int ret;
1067
1068 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1069 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1070
1071 if (!(temp_lo & 0x1)) {
1072
1073 intel_alloc_chipset_flush_resource();
1074
1075 intel_private.resource_valid = 1;
1076 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1077 upper_32_bits(intel_private.ifp_resource.start));
1078 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1079 } else {
1080 u64 l64;
1081
1082 temp_lo &= ~0x1;
1083 l64 = ((u64)temp_hi << 32) | temp_lo;
1084
1085 intel_private.resource_valid = 1;
1086 intel_private.ifp_resource.start = l64;
1087 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1088 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1089 /* some BIOSes reserve this area in a pnp some don't */
1090 if (ret)
1091 intel_private.resource_valid = 0;
1092 }
1093}
1094
1095static void intel_i9xx_setup_flush(void)
1096{
1097 /* return if already configured */
1098 if (intel_private.ifp_resource.start)
1099 return;
1100
1101 if (INTEL_GTT_GEN == 6)
1102 return;
1103
1104 /* setup a resource for this object */
1105 intel_private.ifp_resource.name = "Intel Flush Page";
1106 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1107
1108 /* Setup chipset flush for 915 */
1109 if (IS_G33 || INTEL_GTT_GEN >= 4) {
1110 intel_i965_g33_setup_chipset_flush();
1111 } else {
1112 intel_i915_setup_chipset_flush();
1113 }
1114
1115 if (intel_private.ifp_resource.start)
1116 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1117 if (!intel_private.i9xx_flush_page)
1118 dev_err(&intel_private.pcidev->dev,
1119 "can't ioremap flush page - no chipset flushing\n");
1120}
1121
1122static void i9xx_cleanup(void)
1123{
1124 if (intel_private.i9xx_flush_page)
1125 iounmap(intel_private.i9xx_flush_page);
1126 if (intel_private.resource_valid)
1127 release_resource(&intel_private.ifp_resource);
1128 intel_private.ifp_resource.start = 0;
1129 intel_private.resource_valid = 0;
1130}
1131
1132static void i9xx_chipset_flush(void)
1133{
1134 if (intel_private.i9xx_flush_page)
1135 writel(1, intel_private.i9xx_flush_page);
1136}
1137
1138static void i965_write_entry(dma_addr_t addr,
1139 unsigned int entry,
1140 unsigned int flags)
1141{
1142 u32 pte_flags;
1143
1144 pte_flags = I810_PTE_VALID;
1145 if (flags == AGP_USER_CACHED_MEMORY)
1146 pte_flags |= I830_PTE_SYSTEM_CACHED;
1147
1148 /* Shift high bits down */
1149 addr |= (addr >> 28) & 0xf0;
1150 writel(addr | pte_flags, intel_private.gtt + entry);
1151}
1152
1153static bool gen6_check_flags(unsigned int flags)
1154{
1155 return true;
1156}
1157
1158static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1159 unsigned int flags)
1160{
1161 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1162 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1163 u32 pte_flags;
1164
1165 if (type_mask == AGP_USER_MEMORY)
1166 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1167 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1168 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1169 if (gfdt)
1170 pte_flags |= GEN6_PTE_GFDT;
1171 } else { /* set 'normal'/'cached' to LLC by default */
1172 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1173 if (gfdt)
1174 pte_flags |= GEN6_PTE_GFDT;
1175 }
1176
1177 /* gen6 has bit11-4 for physical addr bit39-32 */
1178 addr |= (addr >> 28) & 0xff0;
1179 writel(addr | pte_flags, intel_private.gtt + entry);
1180}
1181
1182static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
1183 unsigned int flags)
1184{
1185 u32 pte_flags;
1186
1187 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1188
1189 /* gen6 has bit11-4 for physical addr bit39-32 */
1190 addr |= (addr >> 28) & 0xff0;
1191 writel(addr | pte_flags, intel_private.gtt + entry);
1192
1193 writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
1194}
1195
1196static void gen6_cleanup(void)
1197{
1198}
1199
1200/* Certain Gen5 chipsets require require idling the GPU before
1201 * unmapping anything from the GTT when VT-d is enabled.
1202 */
1203static inline int needs_idle_maps(void)
1204{
1205#ifdef CONFIG_INTEL_IOMMU
1206 const unsigned short gpu_devid = intel_private.pcidev->device;
1207
1208 /* Query intel_iommu to see if we need the workaround. Presumably that
1209 * was loaded first.
1210 */
1211 if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
1212 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
1213 intel_iommu_gfx_mapped)
1214 return 1;
1215#endif
1216 return 0;
1217}
1218
1219static int i9xx_setup(void)
1220{
1221 u32 reg_addr;
1222 int size = KB(512);
1223
1224 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr);
1225
1226 reg_addr &= 0xfff80000;
1227
1228 if (INTEL_GTT_GEN >= 7)
1229 size = MB(2);
1230
1231 intel_private.registers = ioremap(reg_addr, size);
1232 if (!intel_private.registers)
1233 return -ENOMEM;
1234
1235 if (INTEL_GTT_GEN == 3) {
1236 u32 gtt_addr;
1237
1238 pci_read_config_dword(intel_private.pcidev,
1239 I915_PTEADDR, >t_addr);
1240 intel_private.gtt_bus_addr = gtt_addr;
1241 } else {
1242 u32 gtt_offset;
1243
1244 switch (INTEL_GTT_GEN) {
1245 case 5:
1246 case 6:
1247 gtt_offset = MB(2);
1248 break;
1249 case 4:
1250 default:
1251 gtt_offset = KB(512);
1252 break;
1253 }
1254 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1255 }
1256
1257 if (needs_idle_maps())
1258 intel_private.base.do_idle_maps = 1;
1259
1260 intel_i9xx_setup_flush();
1261
1262 return 0;
1263}
1264
1265static const struct agp_bridge_driver intel_fake_agp_driver = {
1266 .owner = THIS_MODULE,
1267 .size_type = FIXED_APER_SIZE,
1268 .aperture_sizes = intel_fake_agp_sizes,
1269 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1270 .configure = intel_fake_agp_configure,
1271 .fetch_size = intel_fake_agp_fetch_size,
1272 .cleanup = intel_gtt_cleanup,
1273 .agp_enable = intel_fake_agp_enable,
1274 .cache_flush = global_cache_flush,
1275 .create_gatt_table = intel_fake_agp_create_gatt_table,
1276 .free_gatt_table = intel_fake_agp_free_gatt_table,
1277 .insert_memory = intel_fake_agp_insert_entries,
1278 .remove_memory = intel_fake_agp_remove_entries,
1279 .alloc_by_type = intel_fake_agp_alloc_by_type,
1280 .free_by_type = intel_i810_free_by_type,
1281 .agp_alloc_page = agp_generic_alloc_page,
1282 .agp_alloc_pages = agp_generic_alloc_pages,
1283 .agp_destroy_page = agp_generic_destroy_page,
1284 .agp_destroy_pages = agp_generic_destroy_pages,
1285};
1286
1287static const struct intel_gtt_driver i81x_gtt_driver = {
1288 .gen = 1,
1289 .has_pgtbl_enable = 1,
1290 .dma_mask_size = 32,
1291 .setup = i810_setup,
1292 .cleanup = i810_cleanup,
1293 .check_flags = i830_check_flags,
1294 .write_entry = i810_write_entry,
1295};
1296static const struct intel_gtt_driver i8xx_gtt_driver = {
1297 .gen = 2,
1298 .has_pgtbl_enable = 1,
1299 .setup = i830_setup,
1300 .cleanup = i830_cleanup,
1301 .write_entry = i830_write_entry,
1302 .dma_mask_size = 32,
1303 .check_flags = i830_check_flags,
1304 .chipset_flush = i830_chipset_flush,
1305};
1306static const struct intel_gtt_driver i915_gtt_driver = {
1307 .gen = 3,
1308 .has_pgtbl_enable = 1,
1309 .setup = i9xx_setup,
1310 .cleanup = i9xx_cleanup,
1311 /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1312 .write_entry = i830_write_entry,
1313 .dma_mask_size = 32,
1314 .check_flags = i830_check_flags,
1315 .chipset_flush = i9xx_chipset_flush,
1316};
1317static const struct intel_gtt_driver g33_gtt_driver = {
1318 .gen = 3,
1319 .is_g33 = 1,
1320 .setup = i9xx_setup,
1321 .cleanup = i9xx_cleanup,
1322 .write_entry = i965_write_entry,
1323 .dma_mask_size = 36,
1324 .check_flags = i830_check_flags,
1325 .chipset_flush = i9xx_chipset_flush,
1326};
1327static const struct intel_gtt_driver pineview_gtt_driver = {
1328 .gen = 3,
1329 .is_pineview = 1, .is_g33 = 1,
1330 .setup = i9xx_setup,
1331 .cleanup = i9xx_cleanup,
1332 .write_entry = i965_write_entry,
1333 .dma_mask_size = 36,
1334 .check_flags = i830_check_flags,
1335 .chipset_flush = i9xx_chipset_flush,
1336};
1337static const struct intel_gtt_driver i965_gtt_driver = {
1338 .gen = 4,
1339 .has_pgtbl_enable = 1,
1340 .setup = i9xx_setup,
1341 .cleanup = i9xx_cleanup,
1342 .write_entry = i965_write_entry,
1343 .dma_mask_size = 36,
1344 .check_flags = i830_check_flags,
1345 .chipset_flush = i9xx_chipset_flush,
1346};
1347static const struct intel_gtt_driver g4x_gtt_driver = {
1348 .gen = 5,
1349 .setup = i9xx_setup,
1350 .cleanup = i9xx_cleanup,
1351 .write_entry = i965_write_entry,
1352 .dma_mask_size = 36,
1353 .check_flags = i830_check_flags,
1354 .chipset_flush = i9xx_chipset_flush,
1355};
1356static const struct intel_gtt_driver ironlake_gtt_driver = {
1357 .gen = 5,
1358 .is_ironlake = 1,
1359 .setup = i9xx_setup,
1360 .cleanup = i9xx_cleanup,
1361 .write_entry = i965_write_entry,
1362 .dma_mask_size = 36,
1363 .check_flags = i830_check_flags,
1364 .chipset_flush = i9xx_chipset_flush,
1365};
1366static const struct intel_gtt_driver sandybridge_gtt_driver = {
1367 .gen = 6,
1368 .setup = i9xx_setup,
1369 .cleanup = gen6_cleanup,
1370 .write_entry = gen6_write_entry,
1371 .dma_mask_size = 40,
1372 .check_flags = gen6_check_flags,
1373 .chipset_flush = i9xx_chipset_flush,
1374};
1375static const struct intel_gtt_driver valleyview_gtt_driver = {
1376 .gen = 7,
1377 .setup = i9xx_setup,
1378 .cleanup = gen6_cleanup,
1379 .write_entry = valleyview_write_entry,
1380 .dma_mask_size = 40,
1381 .check_flags = gen6_check_flags,
1382 .chipset_flush = i9xx_chipset_flush,
1383};
1384
1385/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1386 * driver and gmch_driver must be non-null, and find_gmch will determine
1387 * which one should be used if a gmch_chip_id is present.
1388 */
1389static const struct intel_gtt_driver_description {
1390 unsigned int gmch_chip_id;
1391 char *name;
1392 const struct intel_gtt_driver *gtt_driver;
1393} intel_gtt_chipsets[] = {
1394 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1395 &i81x_gtt_driver},
1396 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1397 &i81x_gtt_driver},
1398 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1399 &i81x_gtt_driver},
1400 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1401 &i81x_gtt_driver},
1402 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1403 &i8xx_gtt_driver},
1404 { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
1405 &i8xx_gtt_driver},
1406 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1407 &i8xx_gtt_driver},
1408 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1409 &i8xx_gtt_driver},
1410 { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1411 &i8xx_gtt_driver},
1412 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1413 &i915_gtt_driver },
1414 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1415 &i915_gtt_driver },
1416 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1417 &i915_gtt_driver },
1418 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1419 &i915_gtt_driver },
1420 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1421 &i915_gtt_driver },
1422 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1423 &i915_gtt_driver },
1424 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1425 &i965_gtt_driver },
1426 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1427 &i965_gtt_driver },
1428 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1429 &i965_gtt_driver },
1430 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1431 &i965_gtt_driver },
1432 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1433 &i965_gtt_driver },
1434 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1435 &i965_gtt_driver },
1436 { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1437 &g33_gtt_driver },
1438 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1439 &g33_gtt_driver },
1440 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1441 &g33_gtt_driver },
1442 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1443 &pineview_gtt_driver },
1444 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1445 &pineview_gtt_driver },
1446 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1447 &g4x_gtt_driver },
1448 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1449 &g4x_gtt_driver },
1450 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1451 &g4x_gtt_driver },
1452 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1453 &g4x_gtt_driver },
1454 { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1455 &g4x_gtt_driver },
1456 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1457 &g4x_gtt_driver },
1458 { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1459 &g4x_gtt_driver },
1460 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1461 "HD Graphics", &ironlake_gtt_driver },
1462 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1463 "HD Graphics", &ironlake_gtt_driver },
1464 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1465 "Sandybridge", &sandybridge_gtt_driver },
1466 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1467 "Sandybridge", &sandybridge_gtt_driver },
1468 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1469 "Sandybridge", &sandybridge_gtt_driver },
1470 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1471 "Sandybridge", &sandybridge_gtt_driver },
1472 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1473 "Sandybridge", &sandybridge_gtt_driver },
1474 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1475 "Sandybridge", &sandybridge_gtt_driver },
1476 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1477 "Sandybridge", &sandybridge_gtt_driver },
1478 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
1479 "Ivybridge", &sandybridge_gtt_driver },
1480 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
1481 "Ivybridge", &sandybridge_gtt_driver },
1482 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
1483 "Ivybridge", &sandybridge_gtt_driver },
1484 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
1485 "Ivybridge", &sandybridge_gtt_driver },
1486 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
1487 "Ivybridge", &sandybridge_gtt_driver },
1488 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
1489 "Ivybridge", &sandybridge_gtt_driver },
1490 { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
1491 "ValleyView", &valleyview_gtt_driver },
1492 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
1493 "Haswell", &sandybridge_gtt_driver },
1494 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
1495 "Haswell", &sandybridge_gtt_driver },
1496 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
1497 "Haswell", &sandybridge_gtt_driver },
1498 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
1499 "Haswell", &sandybridge_gtt_driver },
1500 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
1501 "Haswell", &sandybridge_gtt_driver },
1502 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
1503 "Haswell", &sandybridge_gtt_driver },
1504 { PCI_DEVICE_ID_INTEL_HASWELL_SDV,
1505 "Haswell", &sandybridge_gtt_driver },
1506 { 0, NULL, NULL }
1507};
1508
1509static int find_gmch(u16 device)
1510{
1511 struct pci_dev *gmch_device;
1512
1513 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1514 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1515 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1516 device, gmch_device);
1517 }
1518
1519 if (!gmch_device)
1520 return 0;
1521
1522 intel_private.pcidev = gmch_device;
1523 return 1;
1524}
1525
1526int intel_gmch_probe(struct pci_dev *pdev,
1527 struct agp_bridge_data *bridge)
1528{
1529 int i, mask;
1530 intel_private.driver = NULL;
1531
1532 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1533 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1534 intel_private.driver =
1535 intel_gtt_chipsets[i].gtt_driver;
1536 break;
1537 }
1538 }
1539
1540 if (!intel_private.driver)
1541 return 0;
1542
1543 bridge->driver = &intel_fake_agp_driver;
1544 bridge->dev_private_data = &intel_private;
1545 bridge->dev = pdev;
1546
1547 intel_private.bridge_dev = pci_dev_get(pdev);
1548
1549 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1550
1551 mask = intel_private.driver->dma_mask_size;
1552 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1553 dev_err(&intel_private.pcidev->dev,
1554 "set gfx device dma mask %d-bit failed!\n", mask);
1555 else
1556 pci_set_consistent_dma_mask(intel_private.pcidev,
1557 DMA_BIT_MASK(mask));
1558
1559 /*if (bridge->driver == &intel_810_driver)
1560 return 1;*/
1561
1562 if (intel_gtt_init() != 0)
1563 return 0;
1564
1565 return 1;
1566}
1567EXPORT_SYMBOL(intel_gmch_probe);
1568
1569const struct intel_gtt *intel_gtt_get(void)
1570{
1571 return &intel_private.base;
1572}
1573EXPORT_SYMBOL(intel_gtt_get);
1574
1575void intel_gtt_chipset_flush(void)
1576{
1577 if (intel_private.driver->chipset_flush)
1578 intel_private.driver->chipset_flush();
1579}
1580EXPORT_SYMBOL(intel_gtt_chipset_flush);
1581
1582void intel_gmch_remove(struct pci_dev *pdev)
1583{
1584 if (intel_private.pcidev)
1585 pci_dev_put(intel_private.pcidev);
1586 if (intel_private.bridge_dev)
1587 pci_dev_put(intel_private.bridge_dev);
1588}
1589EXPORT_SYMBOL(intel_gmch_remove);
1590
1591MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1592MODULE_LICENSE("GPL and additional rights");