Loading...
1/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18#include <linux/module.h>
19#include <linux/pci.h>
20#include <linux/kernel.h>
21#include <linux/pagemap.h>
22#include <linux/agp_backend.h>
23#include <linux/iommu.h>
24#include <linux/delay.h>
25#include <asm/smp.h>
26#include "agp.h"
27#include "intel-agp.h"
28#include <drm/intel-gtt.h>
29#include <asm/set_memory.h>
30
31/*
32 * If we have Intel graphics, we're not going to have anything other than
33 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
34 * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
35 * Only newer chipsets need to bother with this, of course.
36 */
37#ifdef CONFIG_INTEL_IOMMU
38#define USE_PCI_DMA_API 1
39#else
40#define USE_PCI_DMA_API 0
41#endif
42
43struct intel_gtt_driver {
44 unsigned int gen : 8;
45 unsigned int is_g33 : 1;
46 unsigned int is_pineview : 1;
47 unsigned int is_ironlake : 1;
48 unsigned int has_pgtbl_enable : 1;
49 unsigned int dma_mask_size : 8;
50 /* Chipset specific GTT setup */
51 int (*setup)(void);
52 /* This should undo anything done in ->setup() save the unmapping
53 * of the mmio register file, that's done in the generic code. */
54 void (*cleanup)(void);
55 void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
56 /* Flags is a more or less chipset specific opaque value.
57 * For chipsets that need to support old ums (non-gem) code, this
58 * needs to be identical to the various supported agp memory types! */
59 bool (*check_flags)(unsigned int flags);
60 void (*chipset_flush)(void);
61};
62
63static struct _intel_private {
64 const struct intel_gtt_driver *driver;
65 struct pci_dev *pcidev; /* device one */
66 struct pci_dev *bridge_dev;
67 u8 __iomem *registers;
68 phys_addr_t gtt_phys_addr;
69 u32 PGETBL_save;
70 u32 __iomem *gtt; /* I915G */
71 bool clear_fake_agp; /* on first access via agp, fill with scratch */
72 int num_dcache_entries;
73 void __iomem *i9xx_flush_page;
74 char *i81x_gtt_table;
75 struct resource ifp_resource;
76 int resource_valid;
77 struct page *scratch_page;
78 phys_addr_t scratch_page_dma;
79 int refcount;
80 /* Whether i915 needs to use the dmar apis or not. */
81 unsigned int needs_dmar : 1;
82 phys_addr_t gma_bus_addr;
83 /* Size of memory reserved for graphics by the BIOS */
84 resource_size_t stolen_size;
85 /* Total number of gtt entries. */
86 unsigned int gtt_total_entries;
87 /* Part of the gtt that is mappable by the cpu, for those chips where
88 * this is not the full gtt. */
89 unsigned int gtt_mappable_entries;
90} intel_private;
91
92#define INTEL_GTT_GEN intel_private.driver->gen
93#define IS_G33 intel_private.driver->is_g33
94#define IS_PINEVIEW intel_private.driver->is_pineview
95#define IS_IRONLAKE intel_private.driver->is_ironlake
96#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
97
98#if IS_ENABLED(CONFIG_AGP_INTEL)
99static int intel_gtt_map_memory(struct page **pages,
100 unsigned int num_entries,
101 struct sg_table *st)
102{
103 struct scatterlist *sg;
104 int i;
105
106 DBG("try mapping %lu pages\n", (unsigned long)num_entries);
107
108 if (sg_alloc_table(st, num_entries, GFP_KERNEL))
109 goto err;
110
111 for_each_sg(st->sgl, sg, num_entries, i)
112 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
113
114 if (!dma_map_sg(&intel_private.pcidev->dev, st->sgl, st->nents,
115 DMA_BIDIRECTIONAL))
116 goto err;
117
118 return 0;
119
120err:
121 sg_free_table(st);
122 return -ENOMEM;
123}
124
125static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
126{
127 struct sg_table st;
128 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
129
130 dma_unmap_sg(&intel_private.pcidev->dev, sg_list, num_sg,
131 DMA_BIDIRECTIONAL);
132
133 st.sgl = sg_list;
134 st.orig_nents = st.nents = num_sg;
135
136 sg_free_table(&st);
137}
138
139static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
140{
141 return;
142}
143
144/* Exists to support ARGB cursors */
145static struct page *i8xx_alloc_pages(void)
146{
147 struct page *page;
148
149 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
150 if (page == NULL)
151 return NULL;
152
153 if (set_pages_uc(page, 4) < 0) {
154 set_pages_wb(page, 4);
155 __free_pages(page, 2);
156 return NULL;
157 }
158 atomic_inc(&agp_bridge->current_memory_agp);
159 return page;
160}
161
162static void i8xx_destroy_pages(struct page *page)
163{
164 if (page == NULL)
165 return;
166
167 set_pages_wb(page, 4);
168 __free_pages(page, 2);
169 atomic_dec(&agp_bridge->current_memory_agp);
170}
171#endif
172
173#define I810_GTT_ORDER 4
174static int i810_setup(void)
175{
176 phys_addr_t reg_addr;
177 char *gtt_table;
178
179 /* i81x does not preallocate the gtt. It's always 64kb in size. */
180 gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
181 if (gtt_table == NULL)
182 return -ENOMEM;
183 intel_private.i81x_gtt_table = gtt_table;
184
185 reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
186
187 intel_private.registers = ioremap(reg_addr, KB(64));
188 if (!intel_private.registers)
189 return -ENOMEM;
190
191 writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
192 intel_private.registers+I810_PGETBL_CTL);
193
194 intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
195
196 if ((readl(intel_private.registers+I810_DRAM_CTL)
197 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
198 dev_info(&intel_private.pcidev->dev,
199 "detected 4MB dedicated video ram\n");
200 intel_private.num_dcache_entries = 1024;
201 }
202
203 return 0;
204}
205
206static void i810_cleanup(void)
207{
208 writel(0, intel_private.registers+I810_PGETBL_CTL);
209 free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
210}
211
212#if IS_ENABLED(CONFIG_AGP_INTEL)
213static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
214 int type)
215{
216 int i;
217
218 if ((pg_start + mem->page_count)
219 > intel_private.num_dcache_entries)
220 return -EINVAL;
221
222 if (!mem->is_flushed)
223 global_cache_flush();
224
225 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
226 dma_addr_t addr = i << PAGE_SHIFT;
227 intel_private.driver->write_entry(addr,
228 i, type);
229 }
230 wmb();
231
232 return 0;
233}
234
235/*
236 * The i810/i830 requires a physical address to program its mouse
237 * pointer into hardware.
238 * However the Xserver still writes to it through the agp aperture.
239 */
240static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
241{
242 struct agp_memory *new;
243 struct page *page;
244
245 switch (pg_count) {
246 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
247 break;
248 case 4:
249 /* kludge to get 4 physical pages for ARGB cursor */
250 page = i8xx_alloc_pages();
251 break;
252 default:
253 return NULL;
254 }
255
256 if (page == NULL)
257 return NULL;
258
259 new = agp_create_memory(pg_count);
260 if (new == NULL)
261 return NULL;
262
263 new->pages[0] = page;
264 if (pg_count == 4) {
265 /* kludge to get 4 physical pages for ARGB cursor */
266 new->pages[1] = new->pages[0] + 1;
267 new->pages[2] = new->pages[1] + 1;
268 new->pages[3] = new->pages[2] + 1;
269 }
270 new->page_count = pg_count;
271 new->num_scratch_pages = pg_count;
272 new->type = AGP_PHYS_MEMORY;
273 new->physical = page_to_phys(new->pages[0]);
274 return new;
275}
276
277static void intel_i810_free_by_type(struct agp_memory *curr)
278{
279 agp_free_key(curr->key);
280 if (curr->type == AGP_PHYS_MEMORY) {
281 if (curr->page_count == 4)
282 i8xx_destroy_pages(curr->pages[0]);
283 else {
284 agp_bridge->driver->agp_destroy_page(curr->pages[0],
285 AGP_PAGE_DESTROY_UNMAP);
286 agp_bridge->driver->agp_destroy_page(curr->pages[0],
287 AGP_PAGE_DESTROY_FREE);
288 }
289 agp_free_page_array(curr);
290 }
291 kfree(curr);
292}
293#endif
294
295static int intel_gtt_setup_scratch_page(void)
296{
297 struct page *page;
298 dma_addr_t dma_addr;
299
300 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
301 if (page == NULL)
302 return -ENOMEM;
303 set_pages_uc(page, 1);
304
305 if (intel_private.needs_dmar) {
306 dma_addr = dma_map_page(&intel_private.pcidev->dev, page, 0,
307 PAGE_SIZE, DMA_BIDIRECTIONAL);
308 if (dma_mapping_error(&intel_private.pcidev->dev, dma_addr)) {
309 __free_page(page);
310 return -EINVAL;
311 }
312
313 intel_private.scratch_page_dma = dma_addr;
314 } else
315 intel_private.scratch_page_dma = page_to_phys(page);
316
317 intel_private.scratch_page = page;
318
319 return 0;
320}
321
322static void i810_write_entry(dma_addr_t addr, unsigned int entry,
323 unsigned int flags)
324{
325 u32 pte_flags = I810_PTE_VALID;
326
327 switch (flags) {
328 case AGP_DCACHE_MEMORY:
329 pte_flags |= I810_PTE_LOCAL;
330 break;
331 case AGP_USER_CACHED_MEMORY:
332 pte_flags |= I830_PTE_SYSTEM_CACHED;
333 break;
334 }
335
336 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
337}
338
339static resource_size_t intel_gtt_stolen_size(void)
340{
341 u16 gmch_ctrl;
342 u8 rdct;
343 int local = 0;
344 static const int ddt[4] = { 0, 16, 32, 64 };
345 resource_size_t stolen_size = 0;
346
347 if (INTEL_GTT_GEN == 1)
348 return 0; /* no stolen mem on i81x */
349
350 pci_read_config_word(intel_private.bridge_dev,
351 I830_GMCH_CTRL, &gmch_ctrl);
352
353 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
354 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
355 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
356 case I830_GMCH_GMS_STOLEN_512:
357 stolen_size = KB(512);
358 break;
359 case I830_GMCH_GMS_STOLEN_1024:
360 stolen_size = MB(1);
361 break;
362 case I830_GMCH_GMS_STOLEN_8192:
363 stolen_size = MB(8);
364 break;
365 case I830_GMCH_GMS_LOCAL:
366 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
367 stolen_size = (I830_RDRAM_ND(rdct) + 1) *
368 MB(ddt[I830_RDRAM_DDT(rdct)]);
369 local = 1;
370 break;
371 default:
372 stolen_size = 0;
373 break;
374 }
375 } else {
376 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
377 case I855_GMCH_GMS_STOLEN_1M:
378 stolen_size = MB(1);
379 break;
380 case I855_GMCH_GMS_STOLEN_4M:
381 stolen_size = MB(4);
382 break;
383 case I855_GMCH_GMS_STOLEN_8M:
384 stolen_size = MB(8);
385 break;
386 case I855_GMCH_GMS_STOLEN_16M:
387 stolen_size = MB(16);
388 break;
389 case I855_GMCH_GMS_STOLEN_32M:
390 stolen_size = MB(32);
391 break;
392 case I915_GMCH_GMS_STOLEN_48M:
393 stolen_size = MB(48);
394 break;
395 case I915_GMCH_GMS_STOLEN_64M:
396 stolen_size = MB(64);
397 break;
398 case G33_GMCH_GMS_STOLEN_128M:
399 stolen_size = MB(128);
400 break;
401 case G33_GMCH_GMS_STOLEN_256M:
402 stolen_size = MB(256);
403 break;
404 case INTEL_GMCH_GMS_STOLEN_96M:
405 stolen_size = MB(96);
406 break;
407 case INTEL_GMCH_GMS_STOLEN_160M:
408 stolen_size = MB(160);
409 break;
410 case INTEL_GMCH_GMS_STOLEN_224M:
411 stolen_size = MB(224);
412 break;
413 case INTEL_GMCH_GMS_STOLEN_352M:
414 stolen_size = MB(352);
415 break;
416 default:
417 stolen_size = 0;
418 break;
419 }
420 }
421
422 if (stolen_size > 0) {
423 dev_info(&intel_private.bridge_dev->dev, "detected %lluK %s memory\n",
424 (u64)stolen_size / KB(1), local ? "local" : "stolen");
425 } else {
426 dev_info(&intel_private.bridge_dev->dev,
427 "no pre-allocated video memory detected\n");
428 stolen_size = 0;
429 }
430
431 return stolen_size;
432}
433
434static void i965_adjust_pgetbl_size(unsigned int size_flag)
435{
436 u32 pgetbl_ctl, pgetbl_ctl2;
437
438 /* ensure that ppgtt is disabled */
439 pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
440 pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
441 writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
442
443 /* write the new ggtt size */
444 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
445 pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
446 pgetbl_ctl |= size_flag;
447 writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
448}
449
450static unsigned int i965_gtt_total_entries(void)
451{
452 int size;
453 u32 pgetbl_ctl;
454 u16 gmch_ctl;
455
456 pci_read_config_word(intel_private.bridge_dev,
457 I830_GMCH_CTRL, &gmch_ctl);
458
459 if (INTEL_GTT_GEN == 5) {
460 switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
461 case G4x_GMCH_SIZE_1M:
462 case G4x_GMCH_SIZE_VT_1M:
463 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
464 break;
465 case G4x_GMCH_SIZE_VT_1_5M:
466 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
467 break;
468 case G4x_GMCH_SIZE_2M:
469 case G4x_GMCH_SIZE_VT_2M:
470 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
471 break;
472 }
473 }
474
475 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
476
477 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
478 case I965_PGETBL_SIZE_128KB:
479 size = KB(128);
480 break;
481 case I965_PGETBL_SIZE_256KB:
482 size = KB(256);
483 break;
484 case I965_PGETBL_SIZE_512KB:
485 size = KB(512);
486 break;
487 /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
488 case I965_PGETBL_SIZE_1MB:
489 size = KB(1024);
490 break;
491 case I965_PGETBL_SIZE_2MB:
492 size = KB(2048);
493 break;
494 case I965_PGETBL_SIZE_1_5MB:
495 size = KB(1024 + 512);
496 break;
497 default:
498 dev_info(&intel_private.pcidev->dev,
499 "unknown page table size, assuming 512KB\n");
500 size = KB(512);
501 }
502
503 return size/4;
504}
505
506static unsigned int intel_gtt_total_entries(void)
507{
508 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
509 return i965_gtt_total_entries();
510 else {
511 /* On previous hardware, the GTT size was just what was
512 * required to map the aperture.
513 */
514 return intel_private.gtt_mappable_entries;
515 }
516}
517
518static unsigned int intel_gtt_mappable_entries(void)
519{
520 unsigned int aperture_size;
521
522 if (INTEL_GTT_GEN == 1) {
523 u32 smram_miscc;
524
525 pci_read_config_dword(intel_private.bridge_dev,
526 I810_SMRAM_MISCC, &smram_miscc);
527
528 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
529 == I810_GFX_MEM_WIN_32M)
530 aperture_size = MB(32);
531 else
532 aperture_size = MB(64);
533 } else if (INTEL_GTT_GEN == 2) {
534 u16 gmch_ctrl;
535
536 pci_read_config_word(intel_private.bridge_dev,
537 I830_GMCH_CTRL, &gmch_ctrl);
538
539 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
540 aperture_size = MB(64);
541 else
542 aperture_size = MB(128);
543 } else {
544 /* 9xx supports large sizes, just look at the length */
545 aperture_size = pci_resource_len(intel_private.pcidev, 2);
546 }
547
548 return aperture_size >> PAGE_SHIFT;
549}
550
551static void intel_gtt_teardown_scratch_page(void)
552{
553 set_pages_wb(intel_private.scratch_page, 1);
554 if (intel_private.needs_dmar)
555 dma_unmap_page(&intel_private.pcidev->dev,
556 intel_private.scratch_page_dma, PAGE_SIZE,
557 DMA_BIDIRECTIONAL);
558 __free_page(intel_private.scratch_page);
559}
560
561static void intel_gtt_cleanup(void)
562{
563 intel_private.driver->cleanup();
564
565 iounmap(intel_private.gtt);
566 iounmap(intel_private.registers);
567
568 intel_gtt_teardown_scratch_page();
569}
570
571/* Certain Gen5 chipsets require require idling the GPU before
572 * unmapping anything from the GTT when VT-d is enabled.
573 */
574static inline int needs_ilk_vtd_wa(void)
575{
576 const unsigned short gpu_devid = intel_private.pcidev->device;
577
578 /*
579 * Query iommu subsystem to see if we need the workaround. Presumably
580 * that was loaded first.
581 */
582 return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
583 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
584 device_iommu_mapped(&intel_private.pcidev->dev));
585}
586
587static bool intel_gtt_can_wc(void)
588{
589 if (INTEL_GTT_GEN <= 2)
590 return false;
591
592 if (INTEL_GTT_GEN >= 6)
593 return false;
594
595 /* Reports of major corruption with ILK vt'd enabled */
596 if (needs_ilk_vtd_wa())
597 return false;
598
599 return true;
600}
601
602static int intel_gtt_init(void)
603{
604 u32 gtt_map_size;
605 int ret, bar;
606
607 ret = intel_private.driver->setup();
608 if (ret != 0)
609 return ret;
610
611 intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
612 intel_private.gtt_total_entries = intel_gtt_total_entries();
613
614 /* save the PGETBL reg for resume */
615 intel_private.PGETBL_save =
616 readl(intel_private.registers+I810_PGETBL_CTL)
617 & ~I810_PGETBL_ENABLED;
618 /* we only ever restore the register when enabling the PGTBL... */
619 if (HAS_PGTBL_EN)
620 intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
621
622 dev_info(&intel_private.bridge_dev->dev,
623 "detected gtt size: %dK total, %dK mappable\n",
624 intel_private.gtt_total_entries * 4,
625 intel_private.gtt_mappable_entries * 4);
626
627 gtt_map_size = intel_private.gtt_total_entries * 4;
628
629 intel_private.gtt = NULL;
630 if (intel_gtt_can_wc())
631 intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
632 gtt_map_size);
633 if (intel_private.gtt == NULL)
634 intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
635 gtt_map_size);
636 if (intel_private.gtt == NULL) {
637 intel_private.driver->cleanup();
638 iounmap(intel_private.registers);
639 return -ENOMEM;
640 }
641
642#if IS_ENABLED(CONFIG_AGP_INTEL)
643 global_cache_flush(); /* FIXME: ? */
644#endif
645
646 intel_private.stolen_size = intel_gtt_stolen_size();
647
648 intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
649
650 ret = intel_gtt_setup_scratch_page();
651 if (ret != 0) {
652 intel_gtt_cleanup();
653 return ret;
654 }
655
656 if (INTEL_GTT_GEN <= 2)
657 bar = I810_GMADR_BAR;
658 else
659 bar = I915_GMADR_BAR;
660
661 intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
662 return 0;
663}
664
665#if IS_ENABLED(CONFIG_AGP_INTEL)
666static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
667 {32, 8192, 3},
668 {64, 16384, 4},
669 {128, 32768, 5},
670 {256, 65536, 6},
671 {512, 131072, 7},
672};
673
674static int intel_fake_agp_fetch_size(void)
675{
676 int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
677 unsigned int aper_size;
678 int i;
679
680 aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
681
682 for (i = 0; i < num_sizes; i++) {
683 if (aper_size == intel_fake_agp_sizes[i].size) {
684 agp_bridge->current_size =
685 (void *) (intel_fake_agp_sizes + i);
686 return aper_size;
687 }
688 }
689
690 return 0;
691}
692#endif
693
694static void i830_cleanup(void)
695{
696}
697
698/* The chipset_flush interface needs to get data that has already been
699 * flushed out of the CPU all the way out to main memory, because the GPU
700 * doesn't snoop those buffers.
701 *
702 * The 8xx series doesn't have the same lovely interface for flushing the
703 * chipset write buffers that the later chips do. According to the 865
704 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
705 * that buffer out, we just fill 1KB and clflush it out, on the assumption
706 * that it'll push whatever was in there out. It appears to work.
707 */
708static void i830_chipset_flush(void)
709{
710 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
711
712 /* Forcibly evict everything from the CPU write buffers.
713 * clflush appears to be insufficient.
714 */
715 wbinvd_on_all_cpus();
716
717 /* Now we've only seen documents for this magic bit on 855GM,
718 * we hope it exists for the other gen2 chipsets...
719 *
720 * Also works as advertised on my 845G.
721 */
722 writel(readl(intel_private.registers+I830_HIC) | (1<<31),
723 intel_private.registers+I830_HIC);
724
725 while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
726 if (time_after(jiffies, timeout))
727 break;
728
729 udelay(50);
730 }
731}
732
733static void i830_write_entry(dma_addr_t addr, unsigned int entry,
734 unsigned int flags)
735{
736 u32 pte_flags = I810_PTE_VALID;
737
738 if (flags == AGP_USER_CACHED_MEMORY)
739 pte_flags |= I830_PTE_SYSTEM_CACHED;
740
741 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
742}
743
744bool intel_gmch_enable_gtt(void)
745{
746 u8 __iomem *reg;
747
748 if (INTEL_GTT_GEN == 2) {
749 u16 gmch_ctrl;
750
751 pci_read_config_word(intel_private.bridge_dev,
752 I830_GMCH_CTRL, &gmch_ctrl);
753 gmch_ctrl |= I830_GMCH_ENABLED;
754 pci_write_config_word(intel_private.bridge_dev,
755 I830_GMCH_CTRL, gmch_ctrl);
756
757 pci_read_config_word(intel_private.bridge_dev,
758 I830_GMCH_CTRL, &gmch_ctrl);
759 if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
760 dev_err(&intel_private.pcidev->dev,
761 "failed to enable the GTT: GMCH_CTRL=%x\n",
762 gmch_ctrl);
763 return false;
764 }
765 }
766
767 /* On the resume path we may be adjusting the PGTBL value, so
768 * be paranoid and flush all chipset write buffers...
769 */
770 if (INTEL_GTT_GEN >= 3)
771 writel(0, intel_private.registers+GFX_FLSH_CNTL);
772
773 reg = intel_private.registers+I810_PGETBL_CTL;
774 writel(intel_private.PGETBL_save, reg);
775 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
776 dev_err(&intel_private.pcidev->dev,
777 "failed to enable the GTT: PGETBL=%x [expected %x]\n",
778 readl(reg), intel_private.PGETBL_save);
779 return false;
780 }
781
782 if (INTEL_GTT_GEN >= 3)
783 writel(0, intel_private.registers+GFX_FLSH_CNTL);
784
785 return true;
786}
787EXPORT_SYMBOL(intel_gmch_enable_gtt);
788
789static int i830_setup(void)
790{
791 phys_addr_t reg_addr;
792
793 reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
794
795 intel_private.registers = ioremap(reg_addr, KB(64));
796 if (!intel_private.registers)
797 return -ENOMEM;
798
799 intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
800
801 return 0;
802}
803
804#if IS_ENABLED(CONFIG_AGP_INTEL)
805static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
806{
807 agp_bridge->gatt_table_real = NULL;
808 agp_bridge->gatt_table = NULL;
809 agp_bridge->gatt_bus_addr = 0;
810
811 return 0;
812}
813
814static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
815{
816 return 0;
817}
818
819static int intel_fake_agp_configure(void)
820{
821 if (!intel_gmch_enable_gtt())
822 return -EIO;
823
824 intel_private.clear_fake_agp = true;
825 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
826
827 return 0;
828}
829#endif
830
831static bool i830_check_flags(unsigned int flags)
832{
833 switch (flags) {
834 case 0:
835 case AGP_PHYS_MEMORY:
836 case AGP_USER_CACHED_MEMORY:
837 case AGP_USER_MEMORY:
838 return true;
839 }
840
841 return false;
842}
843
844void intel_gmch_gtt_insert_page(dma_addr_t addr,
845 unsigned int pg,
846 unsigned int flags)
847{
848 intel_private.driver->write_entry(addr, pg, flags);
849 readl(intel_private.gtt + pg);
850 if (intel_private.driver->chipset_flush)
851 intel_private.driver->chipset_flush();
852}
853EXPORT_SYMBOL(intel_gmch_gtt_insert_page);
854
855void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
856 unsigned int pg_start,
857 unsigned int flags)
858{
859 struct scatterlist *sg;
860 unsigned int len, m;
861 int i, j;
862
863 j = pg_start;
864
865 /* sg may merge pages, but we have to separate
866 * per-page addr for GTT */
867 for_each_sg(st->sgl, sg, st->nents, i) {
868 len = sg_dma_len(sg) >> PAGE_SHIFT;
869 for (m = 0; m < len; m++) {
870 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
871 intel_private.driver->write_entry(addr, j, flags);
872 j++;
873 }
874 }
875 readl(intel_private.gtt + j - 1);
876 if (intel_private.driver->chipset_flush)
877 intel_private.driver->chipset_flush();
878}
879EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
880
881#if IS_ENABLED(CONFIG_AGP_INTEL)
882static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
883 unsigned int num_entries,
884 struct page **pages,
885 unsigned int flags)
886{
887 int i, j;
888
889 for (i = 0, j = first_entry; i < num_entries; i++, j++) {
890 dma_addr_t addr = page_to_phys(pages[i]);
891 intel_private.driver->write_entry(addr,
892 j, flags);
893 }
894 wmb();
895}
896
897static int intel_fake_agp_insert_entries(struct agp_memory *mem,
898 off_t pg_start, int type)
899{
900 int ret = -EINVAL;
901
902 if (intel_private.clear_fake_agp) {
903 int start = intel_private.stolen_size / PAGE_SIZE;
904 int end = intel_private.gtt_mappable_entries;
905 intel_gmch_gtt_clear_range(start, end - start);
906 intel_private.clear_fake_agp = false;
907 }
908
909 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
910 return i810_insert_dcache_entries(mem, pg_start, type);
911
912 if (mem->page_count == 0)
913 goto out;
914
915 if (pg_start + mem->page_count > intel_private.gtt_total_entries)
916 goto out_err;
917
918 if (type != mem->type)
919 goto out_err;
920
921 if (!intel_private.driver->check_flags(type))
922 goto out_err;
923
924 if (!mem->is_flushed)
925 global_cache_flush();
926
927 if (intel_private.needs_dmar) {
928 struct sg_table st;
929
930 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
931 if (ret != 0)
932 return ret;
933
934 intel_gmch_gtt_insert_sg_entries(&st, pg_start, type);
935 mem->sg_list = st.sgl;
936 mem->num_sg = st.nents;
937 } else
938 intel_gmch_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
939 type);
940
941out:
942 ret = 0;
943out_err:
944 mem->is_flushed = true;
945 return ret;
946}
947#endif
948
949void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
950{
951 unsigned int i;
952
953 for (i = first_entry; i < (first_entry + num_entries); i++) {
954 intel_private.driver->write_entry(intel_private.scratch_page_dma,
955 i, 0);
956 }
957 wmb();
958}
959EXPORT_SYMBOL(intel_gmch_gtt_clear_range);
960
961#if IS_ENABLED(CONFIG_AGP_INTEL)
962static int intel_fake_agp_remove_entries(struct agp_memory *mem,
963 off_t pg_start, int type)
964{
965 if (mem->page_count == 0)
966 return 0;
967
968 intel_gmch_gtt_clear_range(pg_start, mem->page_count);
969
970 if (intel_private.needs_dmar) {
971 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
972 mem->sg_list = NULL;
973 mem->num_sg = 0;
974 }
975
976 return 0;
977}
978
979static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
980 int type)
981{
982 struct agp_memory *new;
983
984 if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
985 if (pg_count != intel_private.num_dcache_entries)
986 return NULL;
987
988 new = agp_create_memory(1);
989 if (new == NULL)
990 return NULL;
991
992 new->type = AGP_DCACHE_MEMORY;
993 new->page_count = pg_count;
994 new->num_scratch_pages = 0;
995 agp_free_page_array(new);
996 return new;
997 }
998 if (type == AGP_PHYS_MEMORY)
999 return alloc_agpphysmem_i8xx(pg_count, type);
1000 /* always return NULL for other allocation types for now */
1001 return NULL;
1002}
1003#endif
1004
1005static int intel_alloc_chipset_flush_resource(void)
1006{
1007 int ret;
1008 ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1009 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1010 pcibios_align_resource, intel_private.bridge_dev);
1011
1012 return ret;
1013}
1014
1015static void intel_i915_setup_chipset_flush(void)
1016{
1017 int ret;
1018 u32 temp;
1019
1020 pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1021 if (!(temp & 0x1)) {
1022 intel_alloc_chipset_flush_resource();
1023 intel_private.resource_valid = 1;
1024 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1025 } else {
1026 temp &= ~1;
1027
1028 intel_private.resource_valid = 1;
1029 intel_private.ifp_resource.start = temp;
1030 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1031 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1032 /* some BIOSes reserve this area in a pnp some don't */
1033 if (ret)
1034 intel_private.resource_valid = 0;
1035 }
1036}
1037
1038static void intel_i965_g33_setup_chipset_flush(void)
1039{
1040 u32 temp_hi, temp_lo;
1041 int ret;
1042
1043 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1044 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1045
1046 if (!(temp_lo & 0x1)) {
1047
1048 intel_alloc_chipset_flush_resource();
1049
1050 intel_private.resource_valid = 1;
1051 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1052 upper_32_bits(intel_private.ifp_resource.start));
1053 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1054 } else {
1055 u64 l64;
1056
1057 temp_lo &= ~0x1;
1058 l64 = ((u64)temp_hi << 32) | temp_lo;
1059
1060 intel_private.resource_valid = 1;
1061 intel_private.ifp_resource.start = l64;
1062 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1063 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1064 /* some BIOSes reserve this area in a pnp some don't */
1065 if (ret)
1066 intel_private.resource_valid = 0;
1067 }
1068}
1069
1070static void intel_i9xx_setup_flush(void)
1071{
1072 /* return if already configured */
1073 if (intel_private.ifp_resource.start)
1074 return;
1075
1076 if (INTEL_GTT_GEN == 6)
1077 return;
1078
1079 /* setup a resource for this object */
1080 intel_private.ifp_resource.name = "Intel Flush Page";
1081 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1082
1083 /* Setup chipset flush for 915 */
1084 if (IS_G33 || INTEL_GTT_GEN >= 4) {
1085 intel_i965_g33_setup_chipset_flush();
1086 } else {
1087 intel_i915_setup_chipset_flush();
1088 }
1089
1090 if (intel_private.ifp_resource.start)
1091 intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
1092 if (!intel_private.i9xx_flush_page)
1093 dev_err(&intel_private.pcidev->dev,
1094 "can't ioremap flush page - no chipset flushing\n");
1095}
1096
1097static void i9xx_cleanup(void)
1098{
1099 if (intel_private.i9xx_flush_page)
1100 iounmap(intel_private.i9xx_flush_page);
1101 if (intel_private.resource_valid)
1102 release_resource(&intel_private.ifp_resource);
1103 intel_private.ifp_resource.start = 0;
1104 intel_private.resource_valid = 0;
1105}
1106
1107static void i9xx_chipset_flush(void)
1108{
1109 wmb();
1110 if (intel_private.i9xx_flush_page)
1111 writel(1, intel_private.i9xx_flush_page);
1112}
1113
1114static void i965_write_entry(dma_addr_t addr,
1115 unsigned int entry,
1116 unsigned int flags)
1117{
1118 u32 pte_flags;
1119
1120 pte_flags = I810_PTE_VALID;
1121 if (flags == AGP_USER_CACHED_MEMORY)
1122 pte_flags |= I830_PTE_SYSTEM_CACHED;
1123
1124 /* Shift high bits down */
1125 addr |= (addr >> 28) & 0xf0;
1126 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
1127}
1128
1129static int i9xx_setup(void)
1130{
1131 phys_addr_t reg_addr;
1132 int size = KB(512);
1133
1134 reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
1135
1136 intel_private.registers = ioremap(reg_addr, size);
1137 if (!intel_private.registers)
1138 return -ENOMEM;
1139
1140 switch (INTEL_GTT_GEN) {
1141 case 3:
1142 intel_private.gtt_phys_addr =
1143 pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
1144 break;
1145 case 5:
1146 intel_private.gtt_phys_addr = reg_addr + MB(2);
1147 break;
1148 default:
1149 intel_private.gtt_phys_addr = reg_addr + KB(512);
1150 break;
1151 }
1152
1153 intel_i9xx_setup_flush();
1154
1155 return 0;
1156}
1157
1158#if IS_ENABLED(CONFIG_AGP_INTEL)
1159static const struct agp_bridge_driver intel_fake_agp_driver = {
1160 .owner = THIS_MODULE,
1161 .size_type = FIXED_APER_SIZE,
1162 .aperture_sizes = intel_fake_agp_sizes,
1163 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1164 .configure = intel_fake_agp_configure,
1165 .fetch_size = intel_fake_agp_fetch_size,
1166 .cleanup = intel_gtt_cleanup,
1167 .agp_enable = intel_fake_agp_enable,
1168 .cache_flush = global_cache_flush,
1169 .create_gatt_table = intel_fake_agp_create_gatt_table,
1170 .free_gatt_table = intel_fake_agp_free_gatt_table,
1171 .insert_memory = intel_fake_agp_insert_entries,
1172 .remove_memory = intel_fake_agp_remove_entries,
1173 .alloc_by_type = intel_fake_agp_alloc_by_type,
1174 .free_by_type = intel_i810_free_by_type,
1175 .agp_alloc_page = agp_generic_alloc_page,
1176 .agp_alloc_pages = agp_generic_alloc_pages,
1177 .agp_destroy_page = agp_generic_destroy_page,
1178 .agp_destroy_pages = agp_generic_destroy_pages,
1179};
1180#endif
1181
1182static const struct intel_gtt_driver i81x_gtt_driver = {
1183 .gen = 1,
1184 .has_pgtbl_enable = 1,
1185 .dma_mask_size = 32,
1186 .setup = i810_setup,
1187 .cleanup = i810_cleanup,
1188 .check_flags = i830_check_flags,
1189 .write_entry = i810_write_entry,
1190};
1191static const struct intel_gtt_driver i8xx_gtt_driver = {
1192 .gen = 2,
1193 .has_pgtbl_enable = 1,
1194 .setup = i830_setup,
1195 .cleanup = i830_cleanup,
1196 .write_entry = i830_write_entry,
1197 .dma_mask_size = 32,
1198 .check_flags = i830_check_flags,
1199 .chipset_flush = i830_chipset_flush,
1200};
1201static const struct intel_gtt_driver i915_gtt_driver = {
1202 .gen = 3,
1203 .has_pgtbl_enable = 1,
1204 .setup = i9xx_setup,
1205 .cleanup = i9xx_cleanup,
1206 /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1207 .write_entry = i830_write_entry,
1208 .dma_mask_size = 32,
1209 .check_flags = i830_check_flags,
1210 .chipset_flush = i9xx_chipset_flush,
1211};
1212static const struct intel_gtt_driver g33_gtt_driver = {
1213 .gen = 3,
1214 .is_g33 = 1,
1215 .setup = i9xx_setup,
1216 .cleanup = i9xx_cleanup,
1217 .write_entry = i965_write_entry,
1218 .dma_mask_size = 36,
1219 .check_flags = i830_check_flags,
1220 .chipset_flush = i9xx_chipset_flush,
1221};
1222static const struct intel_gtt_driver pineview_gtt_driver = {
1223 .gen = 3,
1224 .is_pineview = 1, .is_g33 = 1,
1225 .setup = i9xx_setup,
1226 .cleanup = i9xx_cleanup,
1227 .write_entry = i965_write_entry,
1228 .dma_mask_size = 36,
1229 .check_flags = i830_check_flags,
1230 .chipset_flush = i9xx_chipset_flush,
1231};
1232static const struct intel_gtt_driver i965_gtt_driver = {
1233 .gen = 4,
1234 .has_pgtbl_enable = 1,
1235 .setup = i9xx_setup,
1236 .cleanup = i9xx_cleanup,
1237 .write_entry = i965_write_entry,
1238 .dma_mask_size = 36,
1239 .check_flags = i830_check_flags,
1240 .chipset_flush = i9xx_chipset_flush,
1241};
1242static const struct intel_gtt_driver g4x_gtt_driver = {
1243 .gen = 5,
1244 .setup = i9xx_setup,
1245 .cleanup = i9xx_cleanup,
1246 .write_entry = i965_write_entry,
1247 .dma_mask_size = 36,
1248 .check_flags = i830_check_flags,
1249 .chipset_flush = i9xx_chipset_flush,
1250};
1251static const struct intel_gtt_driver ironlake_gtt_driver = {
1252 .gen = 5,
1253 .is_ironlake = 1,
1254 .setup = i9xx_setup,
1255 .cleanup = i9xx_cleanup,
1256 .write_entry = i965_write_entry,
1257 .dma_mask_size = 36,
1258 .check_flags = i830_check_flags,
1259 .chipset_flush = i9xx_chipset_flush,
1260};
1261
1262/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1263 * driver and gmch_driver must be non-null, and find_gmch will determine
1264 * which one should be used if a gmch_chip_id is present.
1265 */
1266static const struct intel_gtt_driver_description {
1267 unsigned int gmch_chip_id;
1268 char *name;
1269 const struct intel_gtt_driver *gtt_driver;
1270} intel_gtt_chipsets[] = {
1271 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1272 &i81x_gtt_driver},
1273 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1274 &i81x_gtt_driver},
1275 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1276 &i81x_gtt_driver},
1277 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1278 &i81x_gtt_driver},
1279 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1280 &i8xx_gtt_driver},
1281 { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
1282 &i8xx_gtt_driver},
1283 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1284 &i8xx_gtt_driver},
1285 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1286 &i8xx_gtt_driver},
1287 { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1288 &i8xx_gtt_driver},
1289 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1290 &i915_gtt_driver },
1291 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1292 &i915_gtt_driver },
1293 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1294 &i915_gtt_driver },
1295 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1296 &i915_gtt_driver },
1297 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1298 &i915_gtt_driver },
1299 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1300 &i915_gtt_driver },
1301 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1302 &i965_gtt_driver },
1303 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1304 &i965_gtt_driver },
1305 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1306 &i965_gtt_driver },
1307 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1308 &i965_gtt_driver },
1309 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1310 &i965_gtt_driver },
1311 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1312 &i965_gtt_driver },
1313 { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1314 &g33_gtt_driver },
1315 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1316 &g33_gtt_driver },
1317 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1318 &g33_gtt_driver },
1319 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1320 &pineview_gtt_driver },
1321 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1322 &pineview_gtt_driver },
1323 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1324 &g4x_gtt_driver },
1325 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1326 &g4x_gtt_driver },
1327 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1328 &g4x_gtt_driver },
1329 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1330 &g4x_gtt_driver },
1331 { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1332 &g4x_gtt_driver },
1333 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1334 &g4x_gtt_driver },
1335 { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1336 &g4x_gtt_driver },
1337 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1338 "HD Graphics", &ironlake_gtt_driver },
1339 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1340 "HD Graphics", &ironlake_gtt_driver },
1341 { 0, NULL, NULL }
1342};
1343
1344static int find_gmch(u16 device)
1345{
1346 struct pci_dev *gmch_device;
1347
1348 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1349 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1350 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1351 device, gmch_device);
1352 }
1353
1354 if (!gmch_device)
1355 return 0;
1356
1357 intel_private.pcidev = gmch_device;
1358 return 1;
1359}
1360
1361int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1362 struct agp_bridge_data *bridge)
1363{
1364 int i, mask;
1365
1366 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1367 if (gpu_pdev) {
1368 if (gpu_pdev->device ==
1369 intel_gtt_chipsets[i].gmch_chip_id) {
1370 intel_private.pcidev = pci_dev_get(gpu_pdev);
1371 intel_private.driver =
1372 intel_gtt_chipsets[i].gtt_driver;
1373
1374 break;
1375 }
1376 } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1377 intel_private.driver =
1378 intel_gtt_chipsets[i].gtt_driver;
1379 break;
1380 }
1381 }
1382
1383 if (!intel_private.driver)
1384 return 0;
1385
1386#if IS_ENABLED(CONFIG_AGP_INTEL)
1387 if (bridge) {
1388 if (INTEL_GTT_GEN > 1)
1389 return 0;
1390
1391 bridge->driver = &intel_fake_agp_driver;
1392 bridge->dev_private_data = &intel_private;
1393 bridge->dev = bridge_pdev;
1394 }
1395#endif
1396
1397
1398 /*
1399 * Can be called from the fake agp driver but also directly from
1400 * drm/i915.ko. Hence we need to check whether everything is set up
1401 * already.
1402 */
1403 if (intel_private.refcount++)
1404 return 1;
1405
1406 intel_private.bridge_dev = pci_dev_get(bridge_pdev);
1407
1408 dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1409
1410 if (bridge) {
1411 mask = intel_private.driver->dma_mask_size;
1412 if (dma_set_mask(&intel_private.pcidev->dev, DMA_BIT_MASK(mask)))
1413 dev_err(&intel_private.pcidev->dev,
1414 "set gfx device dma mask %d-bit failed!\n",
1415 mask);
1416 else
1417 dma_set_coherent_mask(&intel_private.pcidev->dev,
1418 DMA_BIT_MASK(mask));
1419 }
1420
1421 if (intel_gtt_init() != 0) {
1422 intel_gmch_remove();
1423
1424 return 0;
1425 }
1426
1427 return 1;
1428}
1429EXPORT_SYMBOL(intel_gmch_probe);
1430
1431void intel_gmch_gtt_get(u64 *gtt_total,
1432 phys_addr_t *mappable_base,
1433 resource_size_t *mappable_end)
1434{
1435 *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1436 *mappable_base = intel_private.gma_bus_addr;
1437 *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
1438}
1439EXPORT_SYMBOL(intel_gmch_gtt_get);
1440
1441void intel_gmch_gtt_flush(void)
1442{
1443 if (intel_private.driver->chipset_flush)
1444 intel_private.driver->chipset_flush();
1445}
1446EXPORT_SYMBOL(intel_gmch_gtt_flush);
1447
1448void intel_gmch_remove(void)
1449{
1450 if (--intel_private.refcount)
1451 return;
1452
1453 if (intel_private.scratch_page)
1454 intel_gtt_teardown_scratch_page();
1455 if (intel_private.pcidev)
1456 pci_dev_put(intel_private.pcidev);
1457 if (intel_private.bridge_dev)
1458 pci_dev_put(intel_private.bridge_dev);
1459 intel_private.driver = NULL;
1460}
1461EXPORT_SYMBOL(intel_gmch_remove);
1462
1463MODULE_AUTHOR("Dave Jones, Various @Intel");
1464MODULE_LICENSE("GPL and additional rights");
1/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18#include <linux/module.h>
19#include <linux/pci.h>
20#include <linux/kernel.h>
21#include <linux/pagemap.h>
22#include <linux/agp_backend.h>
23#include <linux/delay.h>
24#include <asm/smp.h>
25#include "agp.h"
26#include "intel-agp.h"
27#include <drm/intel-gtt.h>
28
29/*
30 * If we have Intel graphics, we're not going to have anything other than
31 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
32 * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
33 * Only newer chipsets need to bother with this, of course.
34 */
35#ifdef CONFIG_INTEL_IOMMU
36#define USE_PCI_DMA_API 1
37#else
38#define USE_PCI_DMA_API 0
39#endif
40
41struct intel_gtt_driver {
42 unsigned int gen : 8;
43 unsigned int is_g33 : 1;
44 unsigned int is_pineview : 1;
45 unsigned int is_ironlake : 1;
46 unsigned int has_pgtbl_enable : 1;
47 unsigned int dma_mask_size : 8;
48 /* Chipset specific GTT setup */
49 int (*setup)(void);
50 /* This should undo anything done in ->setup() save the unmapping
51 * of the mmio register file, that's done in the generic code. */
52 void (*cleanup)(void);
53 void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
54 /* Flags is a more or less chipset specific opaque value.
55 * For chipsets that need to support old ums (non-gem) code, this
56 * needs to be identical to the various supported agp memory types! */
57 bool (*check_flags)(unsigned int flags);
58 void (*chipset_flush)(void);
59};
60
61static struct _intel_private {
62 const struct intel_gtt_driver *driver;
63 struct pci_dev *pcidev; /* device one */
64 struct pci_dev *bridge_dev;
65 u8 __iomem *registers;
66 phys_addr_t gtt_phys_addr;
67 u32 PGETBL_save;
68 u32 __iomem *gtt; /* I915G */
69 bool clear_fake_agp; /* on first access via agp, fill with scratch */
70 int num_dcache_entries;
71 void __iomem *i9xx_flush_page;
72 char *i81x_gtt_table;
73 struct resource ifp_resource;
74 int resource_valid;
75 struct page *scratch_page;
76 phys_addr_t scratch_page_dma;
77 int refcount;
78 /* Whether i915 needs to use the dmar apis or not. */
79 unsigned int needs_dmar : 1;
80 phys_addr_t gma_bus_addr;
81 /* Size of memory reserved for graphics by the BIOS */
82 unsigned int stolen_size;
83 /* Total number of gtt entries. */
84 unsigned int gtt_total_entries;
85 /* Part of the gtt that is mappable by the cpu, for those chips where
86 * this is not the full gtt. */
87 unsigned int gtt_mappable_entries;
88} intel_private;
89
90#define INTEL_GTT_GEN intel_private.driver->gen
91#define IS_G33 intel_private.driver->is_g33
92#define IS_PINEVIEW intel_private.driver->is_pineview
93#define IS_IRONLAKE intel_private.driver->is_ironlake
94#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
95
96#if IS_ENABLED(CONFIG_AGP_INTEL)
97static int intel_gtt_map_memory(struct page **pages,
98 unsigned int num_entries,
99 struct sg_table *st)
100{
101 struct scatterlist *sg;
102 int i;
103
104 DBG("try mapping %lu pages\n", (unsigned long)num_entries);
105
106 if (sg_alloc_table(st, num_entries, GFP_KERNEL))
107 goto err;
108
109 for_each_sg(st->sgl, sg, num_entries, i)
110 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
111
112 if (!pci_map_sg(intel_private.pcidev,
113 st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
114 goto err;
115
116 return 0;
117
118err:
119 sg_free_table(st);
120 return -ENOMEM;
121}
122
123static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
124{
125 struct sg_table st;
126 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
127
128 pci_unmap_sg(intel_private.pcidev, sg_list,
129 num_sg, PCI_DMA_BIDIRECTIONAL);
130
131 st.sgl = sg_list;
132 st.orig_nents = st.nents = num_sg;
133
134 sg_free_table(&st);
135}
136
137static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
138{
139 return;
140}
141
142/* Exists to support ARGB cursors */
143static struct page *i8xx_alloc_pages(void)
144{
145 struct page *page;
146
147 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
148 if (page == NULL)
149 return NULL;
150
151 if (set_pages_uc(page, 4) < 0) {
152 set_pages_wb(page, 4);
153 __free_pages(page, 2);
154 return NULL;
155 }
156 atomic_inc(&agp_bridge->current_memory_agp);
157 return page;
158}
159
160static void i8xx_destroy_pages(struct page *page)
161{
162 if (page == NULL)
163 return;
164
165 set_pages_wb(page, 4);
166 __free_pages(page, 2);
167 atomic_dec(&agp_bridge->current_memory_agp);
168}
169#endif
170
171#define I810_GTT_ORDER 4
172static int i810_setup(void)
173{
174 phys_addr_t reg_addr;
175 char *gtt_table;
176
177 /* i81x does not preallocate the gtt. It's always 64kb in size. */
178 gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
179 if (gtt_table == NULL)
180 return -ENOMEM;
181 intel_private.i81x_gtt_table = gtt_table;
182
183 reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
184
185 intel_private.registers = ioremap(reg_addr, KB(64));
186 if (!intel_private.registers)
187 return -ENOMEM;
188
189 writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
190 intel_private.registers+I810_PGETBL_CTL);
191
192 intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
193
194 if ((readl(intel_private.registers+I810_DRAM_CTL)
195 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
196 dev_info(&intel_private.pcidev->dev,
197 "detected 4MB dedicated video ram\n");
198 intel_private.num_dcache_entries = 1024;
199 }
200
201 return 0;
202}
203
204static void i810_cleanup(void)
205{
206 writel(0, intel_private.registers+I810_PGETBL_CTL);
207 free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
208}
209
210#if IS_ENABLED(CONFIG_AGP_INTEL)
211static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
212 int type)
213{
214 int i;
215
216 if ((pg_start + mem->page_count)
217 > intel_private.num_dcache_entries)
218 return -EINVAL;
219
220 if (!mem->is_flushed)
221 global_cache_flush();
222
223 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
224 dma_addr_t addr = i << PAGE_SHIFT;
225 intel_private.driver->write_entry(addr,
226 i, type);
227 }
228 wmb();
229
230 return 0;
231}
232
233/*
234 * The i810/i830 requires a physical address to program its mouse
235 * pointer into hardware.
236 * However the Xserver still writes to it through the agp aperture.
237 */
238static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
239{
240 struct agp_memory *new;
241 struct page *page;
242
243 switch (pg_count) {
244 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
245 break;
246 case 4:
247 /* kludge to get 4 physical pages for ARGB cursor */
248 page = i8xx_alloc_pages();
249 break;
250 default:
251 return NULL;
252 }
253
254 if (page == NULL)
255 return NULL;
256
257 new = agp_create_memory(pg_count);
258 if (new == NULL)
259 return NULL;
260
261 new->pages[0] = page;
262 if (pg_count == 4) {
263 /* kludge to get 4 physical pages for ARGB cursor */
264 new->pages[1] = new->pages[0] + 1;
265 new->pages[2] = new->pages[1] + 1;
266 new->pages[3] = new->pages[2] + 1;
267 }
268 new->page_count = pg_count;
269 new->num_scratch_pages = pg_count;
270 new->type = AGP_PHYS_MEMORY;
271 new->physical = page_to_phys(new->pages[0]);
272 return new;
273}
274
275static void intel_i810_free_by_type(struct agp_memory *curr)
276{
277 agp_free_key(curr->key);
278 if (curr->type == AGP_PHYS_MEMORY) {
279 if (curr->page_count == 4)
280 i8xx_destroy_pages(curr->pages[0]);
281 else {
282 agp_bridge->driver->agp_destroy_page(curr->pages[0],
283 AGP_PAGE_DESTROY_UNMAP);
284 agp_bridge->driver->agp_destroy_page(curr->pages[0],
285 AGP_PAGE_DESTROY_FREE);
286 }
287 agp_free_page_array(curr);
288 }
289 kfree(curr);
290}
291#endif
292
293static int intel_gtt_setup_scratch_page(void)
294{
295 struct page *page;
296 dma_addr_t dma_addr;
297
298 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
299 if (page == NULL)
300 return -ENOMEM;
301 set_pages_uc(page, 1);
302
303 if (intel_private.needs_dmar) {
304 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
305 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
306 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
307 return -EINVAL;
308
309 intel_private.scratch_page_dma = dma_addr;
310 } else
311 intel_private.scratch_page_dma = page_to_phys(page);
312
313 intel_private.scratch_page = page;
314
315 return 0;
316}
317
318static void i810_write_entry(dma_addr_t addr, unsigned int entry,
319 unsigned int flags)
320{
321 u32 pte_flags = I810_PTE_VALID;
322
323 switch (flags) {
324 case AGP_DCACHE_MEMORY:
325 pte_flags |= I810_PTE_LOCAL;
326 break;
327 case AGP_USER_CACHED_MEMORY:
328 pte_flags |= I830_PTE_SYSTEM_CACHED;
329 break;
330 }
331
332 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
333}
334
335static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
336 {32, 8192, 3},
337 {64, 16384, 4},
338 {128, 32768, 5},
339 {256, 65536, 6},
340 {512, 131072, 7},
341};
342
343static unsigned int intel_gtt_stolen_size(void)
344{
345 u16 gmch_ctrl;
346 u8 rdct;
347 int local = 0;
348 static const int ddt[4] = { 0, 16, 32, 64 };
349 unsigned int stolen_size = 0;
350
351 if (INTEL_GTT_GEN == 1)
352 return 0; /* no stolen mem on i81x */
353
354 pci_read_config_word(intel_private.bridge_dev,
355 I830_GMCH_CTRL, &gmch_ctrl);
356
357 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
358 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
359 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
360 case I830_GMCH_GMS_STOLEN_512:
361 stolen_size = KB(512);
362 break;
363 case I830_GMCH_GMS_STOLEN_1024:
364 stolen_size = MB(1);
365 break;
366 case I830_GMCH_GMS_STOLEN_8192:
367 stolen_size = MB(8);
368 break;
369 case I830_GMCH_GMS_LOCAL:
370 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
371 stolen_size = (I830_RDRAM_ND(rdct) + 1) *
372 MB(ddt[I830_RDRAM_DDT(rdct)]);
373 local = 1;
374 break;
375 default:
376 stolen_size = 0;
377 break;
378 }
379 } else {
380 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
381 case I855_GMCH_GMS_STOLEN_1M:
382 stolen_size = MB(1);
383 break;
384 case I855_GMCH_GMS_STOLEN_4M:
385 stolen_size = MB(4);
386 break;
387 case I855_GMCH_GMS_STOLEN_8M:
388 stolen_size = MB(8);
389 break;
390 case I855_GMCH_GMS_STOLEN_16M:
391 stolen_size = MB(16);
392 break;
393 case I855_GMCH_GMS_STOLEN_32M:
394 stolen_size = MB(32);
395 break;
396 case I915_GMCH_GMS_STOLEN_48M:
397 stolen_size = MB(48);
398 break;
399 case I915_GMCH_GMS_STOLEN_64M:
400 stolen_size = MB(64);
401 break;
402 case G33_GMCH_GMS_STOLEN_128M:
403 stolen_size = MB(128);
404 break;
405 case G33_GMCH_GMS_STOLEN_256M:
406 stolen_size = MB(256);
407 break;
408 case INTEL_GMCH_GMS_STOLEN_96M:
409 stolen_size = MB(96);
410 break;
411 case INTEL_GMCH_GMS_STOLEN_160M:
412 stolen_size = MB(160);
413 break;
414 case INTEL_GMCH_GMS_STOLEN_224M:
415 stolen_size = MB(224);
416 break;
417 case INTEL_GMCH_GMS_STOLEN_352M:
418 stolen_size = MB(352);
419 break;
420 default:
421 stolen_size = 0;
422 break;
423 }
424 }
425
426 if (stolen_size > 0) {
427 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
428 stolen_size / KB(1), local ? "local" : "stolen");
429 } else {
430 dev_info(&intel_private.bridge_dev->dev,
431 "no pre-allocated video memory detected\n");
432 stolen_size = 0;
433 }
434
435 return stolen_size;
436}
437
438static void i965_adjust_pgetbl_size(unsigned int size_flag)
439{
440 u32 pgetbl_ctl, pgetbl_ctl2;
441
442 /* ensure that ppgtt is disabled */
443 pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
444 pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
445 writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
446
447 /* write the new ggtt size */
448 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
449 pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
450 pgetbl_ctl |= size_flag;
451 writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
452}
453
454static unsigned int i965_gtt_total_entries(void)
455{
456 int size;
457 u32 pgetbl_ctl;
458 u16 gmch_ctl;
459
460 pci_read_config_word(intel_private.bridge_dev,
461 I830_GMCH_CTRL, &gmch_ctl);
462
463 if (INTEL_GTT_GEN == 5) {
464 switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
465 case G4x_GMCH_SIZE_1M:
466 case G4x_GMCH_SIZE_VT_1M:
467 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
468 break;
469 case G4x_GMCH_SIZE_VT_1_5M:
470 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
471 break;
472 case G4x_GMCH_SIZE_2M:
473 case G4x_GMCH_SIZE_VT_2M:
474 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
475 break;
476 }
477 }
478
479 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
480
481 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
482 case I965_PGETBL_SIZE_128KB:
483 size = KB(128);
484 break;
485 case I965_PGETBL_SIZE_256KB:
486 size = KB(256);
487 break;
488 case I965_PGETBL_SIZE_512KB:
489 size = KB(512);
490 break;
491 /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
492 case I965_PGETBL_SIZE_1MB:
493 size = KB(1024);
494 break;
495 case I965_PGETBL_SIZE_2MB:
496 size = KB(2048);
497 break;
498 case I965_PGETBL_SIZE_1_5MB:
499 size = KB(1024 + 512);
500 break;
501 default:
502 dev_info(&intel_private.pcidev->dev,
503 "unknown page table size, assuming 512KB\n");
504 size = KB(512);
505 }
506
507 return size/4;
508}
509
510static unsigned int intel_gtt_total_entries(void)
511{
512 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
513 return i965_gtt_total_entries();
514 else {
515 /* On previous hardware, the GTT size was just what was
516 * required to map the aperture.
517 */
518 return intel_private.gtt_mappable_entries;
519 }
520}
521
522static unsigned int intel_gtt_mappable_entries(void)
523{
524 unsigned int aperture_size;
525
526 if (INTEL_GTT_GEN == 1) {
527 u32 smram_miscc;
528
529 pci_read_config_dword(intel_private.bridge_dev,
530 I810_SMRAM_MISCC, &smram_miscc);
531
532 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
533 == I810_GFX_MEM_WIN_32M)
534 aperture_size = MB(32);
535 else
536 aperture_size = MB(64);
537 } else if (INTEL_GTT_GEN == 2) {
538 u16 gmch_ctrl;
539
540 pci_read_config_word(intel_private.bridge_dev,
541 I830_GMCH_CTRL, &gmch_ctrl);
542
543 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
544 aperture_size = MB(64);
545 else
546 aperture_size = MB(128);
547 } else {
548 /* 9xx supports large sizes, just look at the length */
549 aperture_size = pci_resource_len(intel_private.pcidev, 2);
550 }
551
552 return aperture_size >> PAGE_SHIFT;
553}
554
555static void intel_gtt_teardown_scratch_page(void)
556{
557 set_pages_wb(intel_private.scratch_page, 1);
558 if (intel_private.needs_dmar)
559 pci_unmap_page(intel_private.pcidev,
560 intel_private.scratch_page_dma,
561 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
562 __free_page(intel_private.scratch_page);
563}
564
565static void intel_gtt_cleanup(void)
566{
567 intel_private.driver->cleanup();
568
569 iounmap(intel_private.gtt);
570 iounmap(intel_private.registers);
571
572 intel_gtt_teardown_scratch_page();
573}
574
575/* Certain Gen5 chipsets require require idling the GPU before
576 * unmapping anything from the GTT when VT-d is enabled.
577 */
578static inline int needs_ilk_vtd_wa(void)
579{
580#ifdef CONFIG_INTEL_IOMMU
581 const unsigned short gpu_devid = intel_private.pcidev->device;
582
583 /* Query intel_iommu to see if we need the workaround. Presumably that
584 * was loaded first.
585 */
586 if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
587 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
588 intel_iommu_gfx_mapped)
589 return 1;
590#endif
591 return 0;
592}
593
594static bool intel_gtt_can_wc(void)
595{
596 if (INTEL_GTT_GEN <= 2)
597 return false;
598
599 if (INTEL_GTT_GEN >= 6)
600 return false;
601
602 /* Reports of major corruption with ILK vt'd enabled */
603 if (needs_ilk_vtd_wa())
604 return false;
605
606 return true;
607}
608
609static int intel_gtt_init(void)
610{
611 u32 gtt_map_size;
612 int ret, bar;
613
614 ret = intel_private.driver->setup();
615 if (ret != 0)
616 return ret;
617
618 intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
619 intel_private.gtt_total_entries = intel_gtt_total_entries();
620
621 /* save the PGETBL reg for resume */
622 intel_private.PGETBL_save =
623 readl(intel_private.registers+I810_PGETBL_CTL)
624 & ~I810_PGETBL_ENABLED;
625 /* we only ever restore the register when enabling the PGTBL... */
626 if (HAS_PGTBL_EN)
627 intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
628
629 dev_info(&intel_private.bridge_dev->dev,
630 "detected gtt size: %dK total, %dK mappable\n",
631 intel_private.gtt_total_entries * 4,
632 intel_private.gtt_mappable_entries * 4);
633
634 gtt_map_size = intel_private.gtt_total_entries * 4;
635
636 intel_private.gtt = NULL;
637 if (intel_gtt_can_wc())
638 intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
639 gtt_map_size);
640 if (intel_private.gtt == NULL)
641 intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
642 gtt_map_size);
643 if (intel_private.gtt == NULL) {
644 intel_private.driver->cleanup();
645 iounmap(intel_private.registers);
646 return -ENOMEM;
647 }
648
649#if IS_ENABLED(CONFIG_AGP_INTEL)
650 global_cache_flush(); /* FIXME: ? */
651#endif
652
653 intel_private.stolen_size = intel_gtt_stolen_size();
654
655 intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
656
657 ret = intel_gtt_setup_scratch_page();
658 if (ret != 0) {
659 intel_gtt_cleanup();
660 return ret;
661 }
662
663 if (INTEL_GTT_GEN <= 2)
664 bar = I810_GMADR_BAR;
665 else
666 bar = I915_GMADR_BAR;
667
668 intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
669 return 0;
670}
671
672#if IS_ENABLED(CONFIG_AGP_INTEL)
673static int intel_fake_agp_fetch_size(void)
674{
675 int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
676 unsigned int aper_size;
677 int i;
678
679 aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
680
681 for (i = 0; i < num_sizes; i++) {
682 if (aper_size == intel_fake_agp_sizes[i].size) {
683 agp_bridge->current_size =
684 (void *) (intel_fake_agp_sizes + i);
685 return aper_size;
686 }
687 }
688
689 return 0;
690}
691#endif
692
693static void i830_cleanup(void)
694{
695}
696
697/* The chipset_flush interface needs to get data that has already been
698 * flushed out of the CPU all the way out to main memory, because the GPU
699 * doesn't snoop those buffers.
700 *
701 * The 8xx series doesn't have the same lovely interface for flushing the
702 * chipset write buffers that the later chips do. According to the 865
703 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
704 * that buffer out, we just fill 1KB and clflush it out, on the assumption
705 * that it'll push whatever was in there out. It appears to work.
706 */
707static void i830_chipset_flush(void)
708{
709 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
710
711 /* Forcibly evict everything from the CPU write buffers.
712 * clflush appears to be insufficient.
713 */
714 wbinvd_on_all_cpus();
715
716 /* Now we've only seen documents for this magic bit on 855GM,
717 * we hope it exists for the other gen2 chipsets...
718 *
719 * Also works as advertised on my 845G.
720 */
721 writel(readl(intel_private.registers+I830_HIC) | (1<<31),
722 intel_private.registers+I830_HIC);
723
724 while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
725 if (time_after(jiffies, timeout))
726 break;
727
728 udelay(50);
729 }
730}
731
732static void i830_write_entry(dma_addr_t addr, unsigned int entry,
733 unsigned int flags)
734{
735 u32 pte_flags = I810_PTE_VALID;
736
737 if (flags == AGP_USER_CACHED_MEMORY)
738 pte_flags |= I830_PTE_SYSTEM_CACHED;
739
740 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
741}
742
743bool intel_enable_gtt(void)
744{
745 u8 __iomem *reg;
746
747 if (INTEL_GTT_GEN == 2) {
748 u16 gmch_ctrl;
749
750 pci_read_config_word(intel_private.bridge_dev,
751 I830_GMCH_CTRL, &gmch_ctrl);
752 gmch_ctrl |= I830_GMCH_ENABLED;
753 pci_write_config_word(intel_private.bridge_dev,
754 I830_GMCH_CTRL, gmch_ctrl);
755
756 pci_read_config_word(intel_private.bridge_dev,
757 I830_GMCH_CTRL, &gmch_ctrl);
758 if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
759 dev_err(&intel_private.pcidev->dev,
760 "failed to enable the GTT: GMCH_CTRL=%x\n",
761 gmch_ctrl);
762 return false;
763 }
764 }
765
766 /* On the resume path we may be adjusting the PGTBL value, so
767 * be paranoid and flush all chipset write buffers...
768 */
769 if (INTEL_GTT_GEN >= 3)
770 writel(0, intel_private.registers+GFX_FLSH_CNTL);
771
772 reg = intel_private.registers+I810_PGETBL_CTL;
773 writel(intel_private.PGETBL_save, reg);
774 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
775 dev_err(&intel_private.pcidev->dev,
776 "failed to enable the GTT: PGETBL=%x [expected %x]\n",
777 readl(reg), intel_private.PGETBL_save);
778 return false;
779 }
780
781 if (INTEL_GTT_GEN >= 3)
782 writel(0, intel_private.registers+GFX_FLSH_CNTL);
783
784 return true;
785}
786EXPORT_SYMBOL(intel_enable_gtt);
787
788static int i830_setup(void)
789{
790 phys_addr_t reg_addr;
791
792 reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
793
794 intel_private.registers = ioremap(reg_addr, KB(64));
795 if (!intel_private.registers)
796 return -ENOMEM;
797
798 intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
799
800 return 0;
801}
802
803#if IS_ENABLED(CONFIG_AGP_INTEL)
804static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
805{
806 agp_bridge->gatt_table_real = NULL;
807 agp_bridge->gatt_table = NULL;
808 agp_bridge->gatt_bus_addr = 0;
809
810 return 0;
811}
812
813static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
814{
815 return 0;
816}
817
818static int intel_fake_agp_configure(void)
819{
820 if (!intel_enable_gtt())
821 return -EIO;
822
823 intel_private.clear_fake_agp = true;
824 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
825
826 return 0;
827}
828#endif
829
830static bool i830_check_flags(unsigned int flags)
831{
832 switch (flags) {
833 case 0:
834 case AGP_PHYS_MEMORY:
835 case AGP_USER_CACHED_MEMORY:
836 case AGP_USER_MEMORY:
837 return true;
838 }
839
840 return false;
841}
842
843void intel_gtt_insert_page(dma_addr_t addr,
844 unsigned int pg,
845 unsigned int flags)
846{
847 intel_private.driver->write_entry(addr, pg, flags);
848 if (intel_private.driver->chipset_flush)
849 intel_private.driver->chipset_flush();
850}
851EXPORT_SYMBOL(intel_gtt_insert_page);
852
853void intel_gtt_insert_sg_entries(struct sg_table *st,
854 unsigned int pg_start,
855 unsigned int flags)
856{
857 struct scatterlist *sg;
858 unsigned int len, m;
859 int i, j;
860
861 j = pg_start;
862
863 /* sg may merge pages, but we have to separate
864 * per-page addr for GTT */
865 for_each_sg(st->sgl, sg, st->nents, i) {
866 len = sg_dma_len(sg) >> PAGE_SHIFT;
867 for (m = 0; m < len; m++) {
868 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
869 intel_private.driver->write_entry(addr, j, flags);
870 j++;
871 }
872 }
873 wmb();
874}
875EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
876
877#if IS_ENABLED(CONFIG_AGP_INTEL)
878static void intel_gtt_insert_pages(unsigned int first_entry,
879 unsigned int num_entries,
880 struct page **pages,
881 unsigned int flags)
882{
883 int i, j;
884
885 for (i = 0, j = first_entry; i < num_entries; i++, j++) {
886 dma_addr_t addr = page_to_phys(pages[i]);
887 intel_private.driver->write_entry(addr,
888 j, flags);
889 }
890 wmb();
891}
892
893static int intel_fake_agp_insert_entries(struct agp_memory *mem,
894 off_t pg_start, int type)
895{
896 int ret = -EINVAL;
897
898 if (intel_private.clear_fake_agp) {
899 int start = intel_private.stolen_size / PAGE_SIZE;
900 int end = intel_private.gtt_mappable_entries;
901 intel_gtt_clear_range(start, end - start);
902 intel_private.clear_fake_agp = false;
903 }
904
905 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
906 return i810_insert_dcache_entries(mem, pg_start, type);
907
908 if (mem->page_count == 0)
909 goto out;
910
911 if (pg_start + mem->page_count > intel_private.gtt_total_entries)
912 goto out_err;
913
914 if (type != mem->type)
915 goto out_err;
916
917 if (!intel_private.driver->check_flags(type))
918 goto out_err;
919
920 if (!mem->is_flushed)
921 global_cache_flush();
922
923 if (intel_private.needs_dmar) {
924 struct sg_table st;
925
926 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
927 if (ret != 0)
928 return ret;
929
930 intel_gtt_insert_sg_entries(&st, pg_start, type);
931 mem->sg_list = st.sgl;
932 mem->num_sg = st.nents;
933 } else
934 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
935 type);
936
937out:
938 ret = 0;
939out_err:
940 mem->is_flushed = true;
941 return ret;
942}
943#endif
944
945void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
946{
947 unsigned int i;
948
949 for (i = first_entry; i < (first_entry + num_entries); i++) {
950 intel_private.driver->write_entry(intel_private.scratch_page_dma,
951 i, 0);
952 }
953 wmb();
954}
955EXPORT_SYMBOL(intel_gtt_clear_range);
956
957#if IS_ENABLED(CONFIG_AGP_INTEL)
958static int intel_fake_agp_remove_entries(struct agp_memory *mem,
959 off_t pg_start, int type)
960{
961 if (mem->page_count == 0)
962 return 0;
963
964 intel_gtt_clear_range(pg_start, mem->page_count);
965
966 if (intel_private.needs_dmar) {
967 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
968 mem->sg_list = NULL;
969 mem->num_sg = 0;
970 }
971
972 return 0;
973}
974
975static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
976 int type)
977{
978 struct agp_memory *new;
979
980 if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
981 if (pg_count != intel_private.num_dcache_entries)
982 return NULL;
983
984 new = agp_create_memory(1);
985 if (new == NULL)
986 return NULL;
987
988 new->type = AGP_DCACHE_MEMORY;
989 new->page_count = pg_count;
990 new->num_scratch_pages = 0;
991 agp_free_page_array(new);
992 return new;
993 }
994 if (type == AGP_PHYS_MEMORY)
995 return alloc_agpphysmem_i8xx(pg_count, type);
996 /* always return NULL for other allocation types for now */
997 return NULL;
998}
999#endif
1000
1001static int intel_alloc_chipset_flush_resource(void)
1002{
1003 int ret;
1004 ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1005 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1006 pcibios_align_resource, intel_private.bridge_dev);
1007
1008 return ret;
1009}
1010
1011static void intel_i915_setup_chipset_flush(void)
1012{
1013 int ret;
1014 u32 temp;
1015
1016 pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1017 if (!(temp & 0x1)) {
1018 intel_alloc_chipset_flush_resource();
1019 intel_private.resource_valid = 1;
1020 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1021 } else {
1022 temp &= ~1;
1023
1024 intel_private.resource_valid = 1;
1025 intel_private.ifp_resource.start = temp;
1026 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1027 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1028 /* some BIOSes reserve this area in a pnp some don't */
1029 if (ret)
1030 intel_private.resource_valid = 0;
1031 }
1032}
1033
1034static void intel_i965_g33_setup_chipset_flush(void)
1035{
1036 u32 temp_hi, temp_lo;
1037 int ret;
1038
1039 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1040 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1041
1042 if (!(temp_lo & 0x1)) {
1043
1044 intel_alloc_chipset_flush_resource();
1045
1046 intel_private.resource_valid = 1;
1047 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1048 upper_32_bits(intel_private.ifp_resource.start));
1049 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1050 } else {
1051 u64 l64;
1052
1053 temp_lo &= ~0x1;
1054 l64 = ((u64)temp_hi << 32) | temp_lo;
1055
1056 intel_private.resource_valid = 1;
1057 intel_private.ifp_resource.start = l64;
1058 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1059 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1060 /* some BIOSes reserve this area in a pnp some don't */
1061 if (ret)
1062 intel_private.resource_valid = 0;
1063 }
1064}
1065
1066static void intel_i9xx_setup_flush(void)
1067{
1068 /* return if already configured */
1069 if (intel_private.ifp_resource.start)
1070 return;
1071
1072 if (INTEL_GTT_GEN == 6)
1073 return;
1074
1075 /* setup a resource for this object */
1076 intel_private.ifp_resource.name = "Intel Flush Page";
1077 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1078
1079 /* Setup chipset flush for 915 */
1080 if (IS_G33 || INTEL_GTT_GEN >= 4) {
1081 intel_i965_g33_setup_chipset_flush();
1082 } else {
1083 intel_i915_setup_chipset_flush();
1084 }
1085
1086 if (intel_private.ifp_resource.start)
1087 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1088 if (!intel_private.i9xx_flush_page)
1089 dev_err(&intel_private.pcidev->dev,
1090 "can't ioremap flush page - no chipset flushing\n");
1091}
1092
1093static void i9xx_cleanup(void)
1094{
1095 if (intel_private.i9xx_flush_page)
1096 iounmap(intel_private.i9xx_flush_page);
1097 if (intel_private.resource_valid)
1098 release_resource(&intel_private.ifp_resource);
1099 intel_private.ifp_resource.start = 0;
1100 intel_private.resource_valid = 0;
1101}
1102
1103static void i9xx_chipset_flush(void)
1104{
1105 if (intel_private.i9xx_flush_page)
1106 writel(1, intel_private.i9xx_flush_page);
1107}
1108
1109static void i965_write_entry(dma_addr_t addr,
1110 unsigned int entry,
1111 unsigned int flags)
1112{
1113 u32 pte_flags;
1114
1115 pte_flags = I810_PTE_VALID;
1116 if (flags == AGP_USER_CACHED_MEMORY)
1117 pte_flags |= I830_PTE_SYSTEM_CACHED;
1118
1119 /* Shift high bits down */
1120 addr |= (addr >> 28) & 0xf0;
1121 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
1122}
1123
1124static int i9xx_setup(void)
1125{
1126 phys_addr_t reg_addr;
1127 int size = KB(512);
1128
1129 reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
1130
1131 intel_private.registers = ioremap(reg_addr, size);
1132 if (!intel_private.registers)
1133 return -ENOMEM;
1134
1135 switch (INTEL_GTT_GEN) {
1136 case 3:
1137 intel_private.gtt_phys_addr =
1138 pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
1139 break;
1140 case 5:
1141 intel_private.gtt_phys_addr = reg_addr + MB(2);
1142 break;
1143 default:
1144 intel_private.gtt_phys_addr = reg_addr + KB(512);
1145 break;
1146 }
1147
1148 intel_i9xx_setup_flush();
1149
1150 return 0;
1151}
1152
1153#if IS_ENABLED(CONFIG_AGP_INTEL)
1154static const struct agp_bridge_driver intel_fake_agp_driver = {
1155 .owner = THIS_MODULE,
1156 .size_type = FIXED_APER_SIZE,
1157 .aperture_sizes = intel_fake_agp_sizes,
1158 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1159 .configure = intel_fake_agp_configure,
1160 .fetch_size = intel_fake_agp_fetch_size,
1161 .cleanup = intel_gtt_cleanup,
1162 .agp_enable = intel_fake_agp_enable,
1163 .cache_flush = global_cache_flush,
1164 .create_gatt_table = intel_fake_agp_create_gatt_table,
1165 .free_gatt_table = intel_fake_agp_free_gatt_table,
1166 .insert_memory = intel_fake_agp_insert_entries,
1167 .remove_memory = intel_fake_agp_remove_entries,
1168 .alloc_by_type = intel_fake_agp_alloc_by_type,
1169 .free_by_type = intel_i810_free_by_type,
1170 .agp_alloc_page = agp_generic_alloc_page,
1171 .agp_alloc_pages = agp_generic_alloc_pages,
1172 .agp_destroy_page = agp_generic_destroy_page,
1173 .agp_destroy_pages = agp_generic_destroy_pages,
1174};
1175#endif
1176
1177static const struct intel_gtt_driver i81x_gtt_driver = {
1178 .gen = 1,
1179 .has_pgtbl_enable = 1,
1180 .dma_mask_size = 32,
1181 .setup = i810_setup,
1182 .cleanup = i810_cleanup,
1183 .check_flags = i830_check_flags,
1184 .write_entry = i810_write_entry,
1185};
1186static const struct intel_gtt_driver i8xx_gtt_driver = {
1187 .gen = 2,
1188 .has_pgtbl_enable = 1,
1189 .setup = i830_setup,
1190 .cleanup = i830_cleanup,
1191 .write_entry = i830_write_entry,
1192 .dma_mask_size = 32,
1193 .check_flags = i830_check_flags,
1194 .chipset_flush = i830_chipset_flush,
1195};
1196static const struct intel_gtt_driver i915_gtt_driver = {
1197 .gen = 3,
1198 .has_pgtbl_enable = 1,
1199 .setup = i9xx_setup,
1200 .cleanup = i9xx_cleanup,
1201 /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1202 .write_entry = i830_write_entry,
1203 .dma_mask_size = 32,
1204 .check_flags = i830_check_flags,
1205 .chipset_flush = i9xx_chipset_flush,
1206};
1207static const struct intel_gtt_driver g33_gtt_driver = {
1208 .gen = 3,
1209 .is_g33 = 1,
1210 .setup = i9xx_setup,
1211 .cleanup = i9xx_cleanup,
1212 .write_entry = i965_write_entry,
1213 .dma_mask_size = 36,
1214 .check_flags = i830_check_flags,
1215 .chipset_flush = i9xx_chipset_flush,
1216};
1217static const struct intel_gtt_driver pineview_gtt_driver = {
1218 .gen = 3,
1219 .is_pineview = 1, .is_g33 = 1,
1220 .setup = i9xx_setup,
1221 .cleanup = i9xx_cleanup,
1222 .write_entry = i965_write_entry,
1223 .dma_mask_size = 36,
1224 .check_flags = i830_check_flags,
1225 .chipset_flush = i9xx_chipset_flush,
1226};
1227static const struct intel_gtt_driver i965_gtt_driver = {
1228 .gen = 4,
1229 .has_pgtbl_enable = 1,
1230 .setup = i9xx_setup,
1231 .cleanup = i9xx_cleanup,
1232 .write_entry = i965_write_entry,
1233 .dma_mask_size = 36,
1234 .check_flags = i830_check_flags,
1235 .chipset_flush = i9xx_chipset_flush,
1236};
1237static const struct intel_gtt_driver g4x_gtt_driver = {
1238 .gen = 5,
1239 .setup = i9xx_setup,
1240 .cleanup = i9xx_cleanup,
1241 .write_entry = i965_write_entry,
1242 .dma_mask_size = 36,
1243 .check_flags = i830_check_flags,
1244 .chipset_flush = i9xx_chipset_flush,
1245};
1246static const struct intel_gtt_driver ironlake_gtt_driver = {
1247 .gen = 5,
1248 .is_ironlake = 1,
1249 .setup = i9xx_setup,
1250 .cleanup = i9xx_cleanup,
1251 .write_entry = i965_write_entry,
1252 .dma_mask_size = 36,
1253 .check_flags = i830_check_flags,
1254 .chipset_flush = i9xx_chipset_flush,
1255};
1256
1257/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1258 * driver and gmch_driver must be non-null, and find_gmch will determine
1259 * which one should be used if a gmch_chip_id is present.
1260 */
1261static const struct intel_gtt_driver_description {
1262 unsigned int gmch_chip_id;
1263 char *name;
1264 const struct intel_gtt_driver *gtt_driver;
1265} intel_gtt_chipsets[] = {
1266 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1267 &i81x_gtt_driver},
1268 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1269 &i81x_gtt_driver},
1270 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1271 &i81x_gtt_driver},
1272 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1273 &i81x_gtt_driver},
1274 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1275 &i8xx_gtt_driver},
1276 { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
1277 &i8xx_gtt_driver},
1278 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1279 &i8xx_gtt_driver},
1280 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1281 &i8xx_gtt_driver},
1282 { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1283 &i8xx_gtt_driver},
1284 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1285 &i915_gtt_driver },
1286 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1287 &i915_gtt_driver },
1288 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1289 &i915_gtt_driver },
1290 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1291 &i915_gtt_driver },
1292 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1293 &i915_gtt_driver },
1294 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1295 &i915_gtt_driver },
1296 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1297 &i965_gtt_driver },
1298 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1299 &i965_gtt_driver },
1300 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1301 &i965_gtt_driver },
1302 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1303 &i965_gtt_driver },
1304 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1305 &i965_gtt_driver },
1306 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1307 &i965_gtt_driver },
1308 { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1309 &g33_gtt_driver },
1310 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1311 &g33_gtt_driver },
1312 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1313 &g33_gtt_driver },
1314 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1315 &pineview_gtt_driver },
1316 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1317 &pineview_gtt_driver },
1318 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1319 &g4x_gtt_driver },
1320 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1321 &g4x_gtt_driver },
1322 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1323 &g4x_gtt_driver },
1324 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1325 &g4x_gtt_driver },
1326 { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1327 &g4x_gtt_driver },
1328 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1329 &g4x_gtt_driver },
1330 { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1331 &g4x_gtt_driver },
1332 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1333 "HD Graphics", &ironlake_gtt_driver },
1334 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1335 "HD Graphics", &ironlake_gtt_driver },
1336 { 0, NULL, NULL }
1337};
1338
1339static int find_gmch(u16 device)
1340{
1341 struct pci_dev *gmch_device;
1342
1343 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1344 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1345 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1346 device, gmch_device);
1347 }
1348
1349 if (!gmch_device)
1350 return 0;
1351
1352 intel_private.pcidev = gmch_device;
1353 return 1;
1354}
1355
1356int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1357 struct agp_bridge_data *bridge)
1358{
1359 int i, mask;
1360
1361 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1362 if (gpu_pdev) {
1363 if (gpu_pdev->device ==
1364 intel_gtt_chipsets[i].gmch_chip_id) {
1365 intel_private.pcidev = pci_dev_get(gpu_pdev);
1366 intel_private.driver =
1367 intel_gtt_chipsets[i].gtt_driver;
1368
1369 break;
1370 }
1371 } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1372 intel_private.driver =
1373 intel_gtt_chipsets[i].gtt_driver;
1374 break;
1375 }
1376 }
1377
1378 if (!intel_private.driver)
1379 return 0;
1380
1381#if IS_ENABLED(CONFIG_AGP_INTEL)
1382 if (bridge) {
1383 if (INTEL_GTT_GEN > 1)
1384 return 0;
1385
1386 bridge->driver = &intel_fake_agp_driver;
1387 bridge->dev_private_data = &intel_private;
1388 bridge->dev = bridge_pdev;
1389 }
1390#endif
1391
1392
1393 /*
1394 * Can be called from the fake agp driver but also directly from
1395 * drm/i915.ko. Hence we need to check whether everything is set up
1396 * already.
1397 */
1398 if (intel_private.refcount++)
1399 return 1;
1400
1401 intel_private.bridge_dev = pci_dev_get(bridge_pdev);
1402
1403 dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1404
1405 mask = intel_private.driver->dma_mask_size;
1406 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1407 dev_err(&intel_private.pcidev->dev,
1408 "set gfx device dma mask %d-bit failed!\n", mask);
1409 else
1410 pci_set_consistent_dma_mask(intel_private.pcidev,
1411 DMA_BIT_MASK(mask));
1412
1413 if (intel_gtt_init() != 0) {
1414 intel_gmch_remove();
1415
1416 return 0;
1417 }
1418
1419 return 1;
1420}
1421EXPORT_SYMBOL(intel_gmch_probe);
1422
1423void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
1424 phys_addr_t *mappable_base, u64 *mappable_end)
1425{
1426 *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1427 *stolen_size = intel_private.stolen_size;
1428 *mappable_base = intel_private.gma_bus_addr;
1429 *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
1430}
1431EXPORT_SYMBOL(intel_gtt_get);
1432
1433void intel_gtt_chipset_flush(void)
1434{
1435 if (intel_private.driver->chipset_flush)
1436 intel_private.driver->chipset_flush();
1437}
1438EXPORT_SYMBOL(intel_gtt_chipset_flush);
1439
1440void intel_gmch_remove(void)
1441{
1442 if (--intel_private.refcount)
1443 return;
1444
1445 if (intel_private.scratch_page)
1446 intel_gtt_teardown_scratch_page();
1447 if (intel_private.pcidev)
1448 pci_dev_put(intel_private.pcidev);
1449 if (intel_private.bridge_dev)
1450 pci_dev_put(intel_private.bridge_dev);
1451 intel_private.driver = NULL;
1452}
1453EXPORT_SYMBOL(intel_gmch_remove);
1454
1455MODULE_AUTHOR("Dave Jones, Various @Intel");
1456MODULE_LICENSE("GPL and additional rights");