Loading...
1/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
5#include <linux/types.h>
6#include <linux/slab.h>
7#include <linux/init.h>
8#include <linux/errno.h>
9#include <linux/module.h>
10#include <linux/spinlock.h>
11#include <asm/amd_nb.h>
12
13static u32 *flush_words;
14
15const struct pci_device_id amd_nb_misc_ids[] = {
16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
19 {}
20};
21EXPORT_SYMBOL(amd_nb_misc_ids);
22
23static struct pci_device_id amd_nb_link_ids[] = {
24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
25 {}
26};
27
28const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
29 { 0x00, 0x18, 0x20 },
30 { 0xff, 0x00, 0x20 },
31 { 0xfe, 0x00, 0x20 },
32 { }
33};
34
35struct amd_northbridge_info amd_northbridges;
36EXPORT_SYMBOL(amd_northbridges);
37
38static struct pci_dev *next_northbridge(struct pci_dev *dev,
39 const struct pci_device_id *ids)
40{
41 do {
42 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
43 if (!dev)
44 break;
45 } while (!pci_match_id(ids, dev));
46 return dev;
47}
48
49int amd_cache_northbridges(void)
50{
51 u16 i = 0;
52 struct amd_northbridge *nb;
53 struct pci_dev *misc, *link;
54
55 if (amd_nb_num())
56 return 0;
57
58 misc = NULL;
59 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
60 i++;
61
62 if (i == 0)
63 return 0;
64
65 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
66 if (!nb)
67 return -ENOMEM;
68
69 amd_northbridges.nb = nb;
70 amd_northbridges.num = i;
71
72 link = misc = NULL;
73 for (i = 0; i != amd_nb_num(); i++) {
74 node_to_amd_nb(i)->misc = misc =
75 next_northbridge(misc, amd_nb_misc_ids);
76 node_to_amd_nb(i)->link = link =
77 next_northbridge(link, amd_nb_link_ids);
78 }
79
80 /* some CPU families (e.g. family 0x11) do not support GART */
81 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
82 boot_cpu_data.x86 == 0x15)
83 amd_northbridges.flags |= AMD_NB_GART;
84
85 /*
86 * Some CPU families support L3 Cache Index Disable. There are some
87 * limitations because of E382 and E388 on family 0x10.
88 */
89 if (boot_cpu_data.x86 == 0x10 &&
90 boot_cpu_data.x86_model >= 0x8 &&
91 (boot_cpu_data.x86_model > 0x9 ||
92 boot_cpu_data.x86_mask >= 0x1))
93 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
94
95 if (boot_cpu_data.x86 == 0x15)
96 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
97
98 /* L3 cache partitioning is supported on family 0x15 */
99 if (boot_cpu_data.x86 == 0x15)
100 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101
102 return 0;
103}
104EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105
106/*
107 * Ignores subdevice/subvendor but as far as I can figure out
108 * they're useless anyways
109 */
110bool __init early_is_amd_nb(u32 device)
111{
112 const struct pci_device_id *id;
113 u32 vendor = device & 0xffff;
114
115 device >>= 16;
116 for (id = amd_nb_misc_ids; id->vendor; id++)
117 if (vendor == id->vendor && device == id->device)
118 return true;
119 return false;
120}
121
122struct resource *amd_get_mmconfig_range(struct resource *res)
123{
124 u32 address;
125 u64 base, msr;
126 unsigned segn_busn_bits;
127
128 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
129 return NULL;
130
131 /* assume all cpus from fam10h have mmconfig */
132 if (boot_cpu_data.x86 < 0x10)
133 return NULL;
134
135 address = MSR_FAM10H_MMIO_CONF_BASE;
136 rdmsrl(address, msr);
137
138 /* mmconfig is not enabled */
139 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
140 return NULL;
141
142 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
143
144 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
145 FAM10H_MMIO_CONF_BUSRANGE_MASK;
146
147 res->flags = IORESOURCE_MEM;
148 res->start = base;
149 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
150 return res;
151}
152
153int amd_get_subcaches(int cpu)
154{
155 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
156 unsigned int mask;
157 int cuid;
158
159 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
160 return 0;
161
162 pci_read_config_dword(link, 0x1d4, &mask);
163
164 cuid = cpu_data(cpu).compute_unit_id;
165 return (mask >> (4 * cuid)) & 0xf;
166}
167
168int amd_set_subcaches(int cpu, int mask)
169{
170 static unsigned int reset, ban;
171 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
172 unsigned int reg;
173 int cuid;
174
175 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
176 return -EINVAL;
177
178 /* if necessary, collect reset state of L3 partitioning and BAN mode */
179 if (reset == 0) {
180 pci_read_config_dword(nb->link, 0x1d4, &reset);
181 pci_read_config_dword(nb->misc, 0x1b8, &ban);
182 ban &= 0x180000;
183 }
184
185 /* deactivate BAN mode if any subcaches are to be disabled */
186 if (mask != 0xf) {
187 pci_read_config_dword(nb->misc, 0x1b8, ®);
188 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
189 }
190
191 cuid = cpu_data(cpu).compute_unit_id;
192 mask <<= 4 * cuid;
193 mask |= (0xf ^ (1 << cuid)) << 26;
194
195 pci_write_config_dword(nb->link, 0x1d4, mask);
196
197 /* reset BAN mode if L3 partitioning returned to reset state */
198 pci_read_config_dword(nb->link, 0x1d4, ®);
199 if (reg == reset) {
200 pci_read_config_dword(nb->misc, 0x1b8, ®);
201 reg &= ~0x180000;
202 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
203 }
204
205 return 0;
206}
207
208static int amd_cache_gart(void)
209{
210 u16 i;
211
212 if (!amd_nb_has_feature(AMD_NB_GART))
213 return 0;
214
215 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
216 if (!flush_words) {
217 amd_northbridges.flags &= ~AMD_NB_GART;
218 return -ENOMEM;
219 }
220
221 for (i = 0; i != amd_nb_num(); i++)
222 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
223 &flush_words[i]);
224
225 return 0;
226}
227
228void amd_flush_garts(void)
229{
230 int flushed, i;
231 unsigned long flags;
232 static DEFINE_SPINLOCK(gart_lock);
233
234 if (!amd_nb_has_feature(AMD_NB_GART))
235 return;
236
237 /* Avoid races between AGP and IOMMU. In theory it's not needed
238 but I'm not sure if the hardware won't lose flush requests
239 when another is pending. This whole thing is so expensive anyways
240 that it doesn't matter to serialize more. -AK */
241 spin_lock_irqsave(&gart_lock, flags);
242 flushed = 0;
243 for (i = 0; i < amd_nb_num(); i++) {
244 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
245 flush_words[i] | 1);
246 flushed++;
247 }
248 for (i = 0; i < amd_nb_num(); i++) {
249 u32 w;
250 /* Make sure the hardware actually executed the flush*/
251 for (;;) {
252 pci_read_config_dword(node_to_amd_nb(i)->misc,
253 0x9c, &w);
254 if (!(w & 1))
255 break;
256 cpu_relax();
257 }
258 }
259 spin_unlock_irqrestore(&gart_lock, flags);
260 if (!flushed)
261 printk("nothing to flush?\n");
262}
263EXPORT_SYMBOL_GPL(amd_flush_garts);
264
265static __init int init_amd_nbs(void)
266{
267 int err = 0;
268
269 err = amd_cache_northbridges();
270
271 if (err < 0)
272 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
273
274 if (amd_cache_gart() < 0)
275 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
276 "GART support disabled.\n");
277
278 return err;
279}
280
281/* This has to go after the PCI subsystem */
282fs_initcall(init_amd_nbs);
1/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
14#include <asm/amd_nb.h>
15
16static u32 *flush_words;
17
18const struct pci_device_id amd_nb_misc_ids[] = {
19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
25 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
27 {}
28};
29EXPORT_SYMBOL(amd_nb_misc_ids);
30
31static const struct pci_device_id amd_nb_link_ids[] = {
32 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
33 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
34 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
35 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
36 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
37 {}
38};
39
40const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
41 { 0x00, 0x18, 0x20 },
42 { 0xff, 0x00, 0x20 },
43 { 0xfe, 0x00, 0x20 },
44 { }
45};
46
47struct amd_northbridge_info amd_northbridges;
48EXPORT_SYMBOL(amd_northbridges);
49
50static struct pci_dev *next_northbridge(struct pci_dev *dev,
51 const struct pci_device_id *ids)
52{
53 do {
54 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
55 if (!dev)
56 break;
57 } while (!pci_match_id(ids, dev));
58 return dev;
59}
60
61int amd_cache_northbridges(void)
62{
63 u16 i = 0;
64 struct amd_northbridge *nb;
65 struct pci_dev *misc, *link;
66
67 if (amd_nb_num())
68 return 0;
69
70 misc = NULL;
71 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
72 i++;
73
74 if (i == 0)
75 return 0;
76
77 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
78 if (!nb)
79 return -ENOMEM;
80
81 amd_northbridges.nb = nb;
82 amd_northbridges.num = i;
83
84 link = misc = NULL;
85 for (i = 0; i != amd_nb_num(); i++) {
86 node_to_amd_nb(i)->misc = misc =
87 next_northbridge(misc, amd_nb_misc_ids);
88 node_to_amd_nb(i)->link = link =
89 next_northbridge(link, amd_nb_link_ids);
90 }
91
92 if (amd_gart_present())
93 amd_northbridges.flags |= AMD_NB_GART;
94
95 /*
96 * Check for L3 cache presence.
97 */
98 if (!cpuid_edx(0x80000006))
99 return 0;
100
101 /*
102 * Some CPU families support L3 Cache Index Disable. There are some
103 * limitations because of E382 and E388 on family 0x10.
104 */
105 if (boot_cpu_data.x86 == 0x10 &&
106 boot_cpu_data.x86_model >= 0x8 &&
107 (boot_cpu_data.x86_model > 0x9 ||
108 boot_cpu_data.x86_mask >= 0x1))
109 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
110
111 if (boot_cpu_data.x86 == 0x15)
112 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
113
114 /* L3 cache partitioning is supported on family 0x15 */
115 if (boot_cpu_data.x86 == 0x15)
116 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
117
118 return 0;
119}
120EXPORT_SYMBOL_GPL(amd_cache_northbridges);
121
122/*
123 * Ignores subdevice/subvendor but as far as I can figure out
124 * they're useless anyways
125 */
126bool __init early_is_amd_nb(u32 device)
127{
128 const struct pci_device_id *id;
129 u32 vendor = device & 0xffff;
130
131 device >>= 16;
132 for (id = amd_nb_misc_ids; id->vendor; id++)
133 if (vendor == id->vendor && device == id->device)
134 return true;
135 return false;
136}
137
138struct resource *amd_get_mmconfig_range(struct resource *res)
139{
140 u32 address;
141 u64 base, msr;
142 unsigned segn_busn_bits;
143
144 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
145 return NULL;
146
147 /* assume all cpus from fam10h have mmconfig */
148 if (boot_cpu_data.x86 < 0x10)
149 return NULL;
150
151 address = MSR_FAM10H_MMIO_CONF_BASE;
152 rdmsrl(address, msr);
153
154 /* mmconfig is not enabled */
155 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
156 return NULL;
157
158 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
159
160 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
161 FAM10H_MMIO_CONF_BUSRANGE_MASK;
162
163 res->flags = IORESOURCE_MEM;
164 res->start = base;
165 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
166 return res;
167}
168
169int amd_get_subcaches(int cpu)
170{
171 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
172 unsigned int mask;
173
174 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
175 return 0;
176
177 pci_read_config_dword(link, 0x1d4, &mask);
178
179 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
180}
181
182int amd_set_subcaches(int cpu, unsigned long mask)
183{
184 static unsigned int reset, ban;
185 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
186 unsigned int reg;
187 int cuid;
188
189 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
190 return -EINVAL;
191
192 /* if necessary, collect reset state of L3 partitioning and BAN mode */
193 if (reset == 0) {
194 pci_read_config_dword(nb->link, 0x1d4, &reset);
195 pci_read_config_dword(nb->misc, 0x1b8, &ban);
196 ban &= 0x180000;
197 }
198
199 /* deactivate BAN mode if any subcaches are to be disabled */
200 if (mask != 0xf) {
201 pci_read_config_dword(nb->misc, 0x1b8, ®);
202 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
203 }
204
205 cuid = cpu_data(cpu).cpu_core_id;
206 mask <<= 4 * cuid;
207 mask |= (0xf ^ (1 << cuid)) << 26;
208
209 pci_write_config_dword(nb->link, 0x1d4, mask);
210
211 /* reset BAN mode if L3 partitioning returned to reset state */
212 pci_read_config_dword(nb->link, 0x1d4, ®);
213 if (reg == reset) {
214 pci_read_config_dword(nb->misc, 0x1b8, ®);
215 reg &= ~0x180000;
216 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
217 }
218
219 return 0;
220}
221
222static int amd_cache_gart(void)
223{
224 u16 i;
225
226 if (!amd_nb_has_feature(AMD_NB_GART))
227 return 0;
228
229 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
230 if (!flush_words) {
231 amd_northbridges.flags &= ~AMD_NB_GART;
232 return -ENOMEM;
233 }
234
235 for (i = 0; i != amd_nb_num(); i++)
236 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
237 &flush_words[i]);
238
239 return 0;
240}
241
242void amd_flush_garts(void)
243{
244 int flushed, i;
245 unsigned long flags;
246 static DEFINE_SPINLOCK(gart_lock);
247
248 if (!amd_nb_has_feature(AMD_NB_GART))
249 return;
250
251 /* Avoid races between AGP and IOMMU. In theory it's not needed
252 but I'm not sure if the hardware won't lose flush requests
253 when another is pending. This whole thing is so expensive anyways
254 that it doesn't matter to serialize more. -AK */
255 spin_lock_irqsave(&gart_lock, flags);
256 flushed = 0;
257 for (i = 0; i < amd_nb_num(); i++) {
258 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
259 flush_words[i] | 1);
260 flushed++;
261 }
262 for (i = 0; i < amd_nb_num(); i++) {
263 u32 w;
264 /* Make sure the hardware actually executed the flush*/
265 for (;;) {
266 pci_read_config_dword(node_to_amd_nb(i)->misc,
267 0x9c, &w);
268 if (!(w & 1))
269 break;
270 cpu_relax();
271 }
272 }
273 spin_unlock_irqrestore(&gart_lock, flags);
274 if (!flushed)
275 pr_notice("nothing to flush?\n");
276}
277EXPORT_SYMBOL_GPL(amd_flush_garts);
278
279static __init int init_amd_nbs(void)
280{
281 int err = 0;
282
283 err = amd_cache_northbridges();
284
285 if (err < 0)
286 pr_notice("Cannot enumerate AMD northbridges\n");
287
288 if (amd_cache_gart() < 0)
289 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
290
291 return err;
292}
293
294/* This has to go after the PCI subsystem */
295fs_initcall(init_amd_nbs);