Loading...
1/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/export.h>
13#include <linux/spinlock.h>
14#include <asm/amd_nb.h>
15
16#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
17#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
18#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
19#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
20#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
21#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
22
23/* Protect the PCI config register pairs used for SMN and DF indirect access. */
24static DEFINE_MUTEX(smn_mutex);
25
26static u32 *flush_words;
27
28static const struct pci_device_id amd_root_ids[] = {
29 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
30 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
31 {}
32};
33
34#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
35
36const struct pci_device_id amd_nb_misc_ids[] = {
37 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
38 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
39 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
40 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
41 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
42 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
43 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
44 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
45 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
46 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
47 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
48 {}
49};
50EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
51
52static const struct pci_device_id amd_nb_link_ids[] = {
53 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
54 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
55 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
56 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
57 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
58 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
59 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
60 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
61 {}
62};
63
64const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
65 { 0x00, 0x18, 0x20 },
66 { 0xff, 0x00, 0x20 },
67 { 0xfe, 0x00, 0x20 },
68 { }
69};
70
71static struct amd_northbridge_info amd_northbridges;
72
73u16 amd_nb_num(void)
74{
75 return amd_northbridges.num;
76}
77EXPORT_SYMBOL_GPL(amd_nb_num);
78
79bool amd_nb_has_feature(unsigned int feature)
80{
81 return ((amd_northbridges.flags & feature) == feature);
82}
83EXPORT_SYMBOL_GPL(amd_nb_has_feature);
84
85struct amd_northbridge *node_to_amd_nb(int node)
86{
87 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
88}
89EXPORT_SYMBOL_GPL(node_to_amd_nb);
90
91static struct pci_dev *next_northbridge(struct pci_dev *dev,
92 const struct pci_device_id *ids)
93{
94 do {
95 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
96 if (!dev)
97 break;
98 } while (!pci_match_id(ids, dev));
99 return dev;
100}
101
102static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
103{
104 struct pci_dev *root;
105 int err = -ENODEV;
106
107 if (node >= amd_northbridges.num)
108 goto out;
109
110 root = node_to_amd_nb(node)->root;
111 if (!root)
112 goto out;
113
114 mutex_lock(&smn_mutex);
115
116 err = pci_write_config_dword(root, 0x60, address);
117 if (err) {
118 pr_warn("Error programming SMN address 0x%x.\n", address);
119 goto out_unlock;
120 }
121
122 err = (write ? pci_write_config_dword(root, 0x64, *value)
123 : pci_read_config_dword(root, 0x64, value));
124 if (err)
125 pr_warn("Error %s SMN address 0x%x.\n",
126 (write ? "writing to" : "reading from"), address);
127
128out_unlock:
129 mutex_unlock(&smn_mutex);
130
131out:
132 return err;
133}
134
135int amd_smn_read(u16 node, u32 address, u32 *value)
136{
137 return __amd_smn_rw(node, address, value, false);
138}
139EXPORT_SYMBOL_GPL(amd_smn_read);
140
141int amd_smn_write(u16 node, u32 address, u32 value)
142{
143 return __amd_smn_rw(node, address, &value, true);
144}
145EXPORT_SYMBOL_GPL(amd_smn_write);
146
147/*
148 * Data Fabric Indirect Access uses FICAA/FICAD.
149 *
150 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
151 * on the device's Instance Id and the PCI function and register offset of
152 * the desired register.
153 *
154 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
155 * and FICAD HI registers but so far we only need the LO register.
156 */
157int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
158{
159 struct pci_dev *F4;
160 u32 ficaa;
161 int err = -ENODEV;
162
163 if (node >= amd_northbridges.num)
164 goto out;
165
166 F4 = node_to_amd_nb(node)->link;
167 if (!F4)
168 goto out;
169
170 ficaa = 1;
171 ficaa |= reg & 0x3FC;
172 ficaa |= (func & 0x7) << 11;
173 ficaa |= instance_id << 16;
174
175 mutex_lock(&smn_mutex);
176
177 err = pci_write_config_dword(F4, 0x5C, ficaa);
178 if (err) {
179 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
180 goto out_unlock;
181 }
182
183 err = pci_read_config_dword(F4, 0x98, lo);
184 if (err)
185 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
186
187out_unlock:
188 mutex_unlock(&smn_mutex);
189
190out:
191 return err;
192}
193EXPORT_SYMBOL_GPL(amd_df_indirect_read);
194
195int amd_cache_northbridges(void)
196{
197 u16 i = 0;
198 struct amd_northbridge *nb;
199 struct pci_dev *root, *misc, *link;
200
201 if (amd_northbridges.num)
202 return 0;
203
204 misc = NULL;
205 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
206 i++;
207
208 if (!i)
209 return -ENODEV;
210
211 nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
212 if (!nb)
213 return -ENOMEM;
214
215 amd_northbridges.nb = nb;
216 amd_northbridges.num = i;
217
218 link = misc = root = NULL;
219 for (i = 0; i != amd_northbridges.num; i++) {
220 node_to_amd_nb(i)->root = root =
221 next_northbridge(root, amd_root_ids);
222 node_to_amd_nb(i)->misc = misc =
223 next_northbridge(misc, amd_nb_misc_ids);
224 node_to_amd_nb(i)->link = link =
225 next_northbridge(link, amd_nb_link_ids);
226 }
227
228 if (amd_gart_present())
229 amd_northbridges.flags |= AMD_NB_GART;
230
231 /*
232 * Check for L3 cache presence.
233 */
234 if (!cpuid_edx(0x80000006))
235 return 0;
236
237 /*
238 * Some CPU families support L3 Cache Index Disable. There are some
239 * limitations because of E382 and E388 on family 0x10.
240 */
241 if (boot_cpu_data.x86 == 0x10 &&
242 boot_cpu_data.x86_model >= 0x8 &&
243 (boot_cpu_data.x86_model > 0x9 ||
244 boot_cpu_data.x86_stepping >= 0x1))
245 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
246
247 if (boot_cpu_data.x86 == 0x15)
248 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
249
250 /* L3 cache partitioning is supported on family 0x15 */
251 if (boot_cpu_data.x86 == 0x15)
252 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
253
254 return 0;
255}
256EXPORT_SYMBOL_GPL(amd_cache_northbridges);
257
258/*
259 * Ignores subdevice/subvendor but as far as I can figure out
260 * they're useless anyways
261 */
262bool __init early_is_amd_nb(u32 device)
263{
264 const struct pci_device_id *id;
265 u32 vendor = device & 0xffff;
266
267 device >>= 16;
268 for (id = amd_nb_misc_ids; id->vendor; id++)
269 if (vendor == id->vendor && device == id->device)
270 return true;
271 return false;
272}
273
274struct resource *amd_get_mmconfig_range(struct resource *res)
275{
276 u32 address;
277 u64 base, msr;
278 unsigned int segn_busn_bits;
279
280 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
281 return NULL;
282
283 /* assume all cpus from fam10h have mmconfig */
284 if (boot_cpu_data.x86 < 0x10)
285 return NULL;
286
287 address = MSR_FAM10H_MMIO_CONF_BASE;
288 rdmsrl(address, msr);
289
290 /* mmconfig is not enabled */
291 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
292 return NULL;
293
294 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
295
296 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
297 FAM10H_MMIO_CONF_BUSRANGE_MASK;
298
299 res->flags = IORESOURCE_MEM;
300 res->start = base;
301 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
302 return res;
303}
304
305int amd_get_subcaches(int cpu)
306{
307 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
308 unsigned int mask;
309
310 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
311 return 0;
312
313 pci_read_config_dword(link, 0x1d4, &mask);
314
315 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
316}
317
318int amd_set_subcaches(int cpu, unsigned long mask)
319{
320 static unsigned int reset, ban;
321 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
322 unsigned int reg;
323 int cuid;
324
325 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
326 return -EINVAL;
327
328 /* if necessary, collect reset state of L3 partitioning and BAN mode */
329 if (reset == 0) {
330 pci_read_config_dword(nb->link, 0x1d4, &reset);
331 pci_read_config_dword(nb->misc, 0x1b8, &ban);
332 ban &= 0x180000;
333 }
334
335 /* deactivate BAN mode if any subcaches are to be disabled */
336 if (mask != 0xf) {
337 pci_read_config_dword(nb->misc, 0x1b8, ®);
338 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
339 }
340
341 cuid = cpu_data(cpu).cpu_core_id;
342 mask <<= 4 * cuid;
343 mask |= (0xf ^ (1 << cuid)) << 26;
344
345 pci_write_config_dword(nb->link, 0x1d4, mask);
346
347 /* reset BAN mode if L3 partitioning returned to reset state */
348 pci_read_config_dword(nb->link, 0x1d4, ®);
349 if (reg == reset) {
350 pci_read_config_dword(nb->misc, 0x1b8, ®);
351 reg &= ~0x180000;
352 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
353 }
354
355 return 0;
356}
357
358static void amd_cache_gart(void)
359{
360 u16 i;
361
362 if (!amd_nb_has_feature(AMD_NB_GART))
363 return;
364
365 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
366 if (!flush_words) {
367 amd_northbridges.flags &= ~AMD_NB_GART;
368 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
369 return;
370 }
371
372 for (i = 0; i != amd_northbridges.num; i++)
373 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
374}
375
376void amd_flush_garts(void)
377{
378 int flushed, i;
379 unsigned long flags;
380 static DEFINE_SPINLOCK(gart_lock);
381
382 if (!amd_nb_has_feature(AMD_NB_GART))
383 return;
384
385 /*
386 * Avoid races between AGP and IOMMU. In theory it's not needed
387 * but I'm not sure if the hardware won't lose flush requests
388 * when another is pending. This whole thing is so expensive anyways
389 * that it doesn't matter to serialize more. -AK
390 */
391 spin_lock_irqsave(&gart_lock, flags);
392 flushed = 0;
393 for (i = 0; i < amd_northbridges.num; i++) {
394 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
395 flush_words[i] | 1);
396 flushed++;
397 }
398 for (i = 0; i < amd_northbridges.num; i++) {
399 u32 w;
400 /* Make sure the hardware actually executed the flush*/
401 for (;;) {
402 pci_read_config_dword(node_to_amd_nb(i)->misc,
403 0x9c, &w);
404 if (!(w & 1))
405 break;
406 cpu_relax();
407 }
408 }
409 spin_unlock_irqrestore(&gart_lock, flags);
410 if (!flushed)
411 pr_notice("nothing to flush?\n");
412}
413EXPORT_SYMBOL_GPL(amd_flush_garts);
414
415static void __fix_erratum_688(void *info)
416{
417#define MSR_AMD64_IC_CFG 0xC0011021
418
419 msr_set_bit(MSR_AMD64_IC_CFG, 3);
420 msr_set_bit(MSR_AMD64_IC_CFG, 14);
421}
422
423/* Apply erratum 688 fix so machines without a BIOS fix work. */
424static __init void fix_erratum_688(void)
425{
426 struct pci_dev *F4;
427 u32 val;
428
429 if (boot_cpu_data.x86 != 0x14)
430 return;
431
432 if (!amd_northbridges.num)
433 return;
434
435 F4 = node_to_amd_nb(0)->link;
436 if (!F4)
437 return;
438
439 if (pci_read_config_dword(F4, 0x164, &val))
440 return;
441
442 if (val & BIT(2))
443 return;
444
445 on_each_cpu(__fix_erratum_688, NULL, 0);
446
447 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
448}
449
450static __init int init_amd_nbs(void)
451{
452 amd_cache_northbridges();
453 amd_cache_gart();
454
455 fix_erratum_688();
456
457 return 0;
458}
459
460/* This has to go after the PCI subsystem */
461fs_initcall(init_amd_nbs);
1/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/export.h>
13#include <linux/spinlock.h>
14#include <asm/amd_nb.h>
15
16#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
17#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
18#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
19
20/* Protect the PCI config register pairs used for SMN and DF indirect access. */
21static DEFINE_MUTEX(smn_mutex);
22
23static u32 *flush_words;
24
25static const struct pci_device_id amd_root_ids[] = {
26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
27 {}
28};
29
30const struct pci_device_id amd_nb_misc_ids[] = {
31 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
32 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
33 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
34 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
35 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
36 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
37 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
38 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
39 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
40 {}
41};
42EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
43
44static const struct pci_device_id amd_nb_link_ids[] = {
45 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
46 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
47 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
48 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
49 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
50 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
51 {}
52};
53
54const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
55 { 0x00, 0x18, 0x20 },
56 { 0xff, 0x00, 0x20 },
57 { 0xfe, 0x00, 0x20 },
58 { }
59};
60
61static struct amd_northbridge_info amd_northbridges;
62
63u16 amd_nb_num(void)
64{
65 return amd_northbridges.num;
66}
67EXPORT_SYMBOL_GPL(amd_nb_num);
68
69bool amd_nb_has_feature(unsigned int feature)
70{
71 return ((amd_northbridges.flags & feature) == feature);
72}
73EXPORT_SYMBOL_GPL(amd_nb_has_feature);
74
75struct amd_northbridge *node_to_amd_nb(int node)
76{
77 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
78}
79EXPORT_SYMBOL_GPL(node_to_amd_nb);
80
81static struct pci_dev *next_northbridge(struct pci_dev *dev,
82 const struct pci_device_id *ids)
83{
84 do {
85 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
86 if (!dev)
87 break;
88 } while (!pci_match_id(ids, dev));
89 return dev;
90}
91
92static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
93{
94 struct pci_dev *root;
95 int err = -ENODEV;
96
97 if (node >= amd_northbridges.num)
98 goto out;
99
100 root = node_to_amd_nb(node)->root;
101 if (!root)
102 goto out;
103
104 mutex_lock(&smn_mutex);
105
106 err = pci_write_config_dword(root, 0x60, address);
107 if (err) {
108 pr_warn("Error programming SMN address 0x%x.\n", address);
109 goto out_unlock;
110 }
111
112 err = (write ? pci_write_config_dword(root, 0x64, *value)
113 : pci_read_config_dword(root, 0x64, value));
114 if (err)
115 pr_warn("Error %s SMN address 0x%x.\n",
116 (write ? "writing to" : "reading from"), address);
117
118out_unlock:
119 mutex_unlock(&smn_mutex);
120
121out:
122 return err;
123}
124
125int amd_smn_read(u16 node, u32 address, u32 *value)
126{
127 return __amd_smn_rw(node, address, value, false);
128}
129EXPORT_SYMBOL_GPL(amd_smn_read);
130
131int amd_smn_write(u16 node, u32 address, u32 value)
132{
133 return __amd_smn_rw(node, address, &value, true);
134}
135EXPORT_SYMBOL_GPL(amd_smn_write);
136
137/*
138 * Data Fabric Indirect Access uses FICAA/FICAD.
139 *
140 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
141 * on the device's Instance Id and the PCI function and register offset of
142 * the desired register.
143 *
144 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
145 * and FICAD HI registers but so far we only need the LO register.
146 */
147int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
148{
149 struct pci_dev *F4;
150 u32 ficaa;
151 int err = -ENODEV;
152
153 if (node >= amd_northbridges.num)
154 goto out;
155
156 F4 = node_to_amd_nb(node)->link;
157 if (!F4)
158 goto out;
159
160 ficaa = 1;
161 ficaa |= reg & 0x3FC;
162 ficaa |= (func & 0x7) << 11;
163 ficaa |= instance_id << 16;
164
165 mutex_lock(&smn_mutex);
166
167 err = pci_write_config_dword(F4, 0x5C, ficaa);
168 if (err) {
169 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
170 goto out_unlock;
171 }
172
173 err = pci_read_config_dword(F4, 0x98, lo);
174 if (err)
175 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
176
177out_unlock:
178 mutex_unlock(&smn_mutex);
179
180out:
181 return err;
182}
183EXPORT_SYMBOL_GPL(amd_df_indirect_read);
184
185int amd_cache_northbridges(void)
186{
187 u16 i = 0;
188 struct amd_northbridge *nb;
189 struct pci_dev *root, *misc, *link;
190
191 if (amd_northbridges.num)
192 return 0;
193
194 misc = NULL;
195 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
196 i++;
197
198 if (!i)
199 return -ENODEV;
200
201 nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
202 if (!nb)
203 return -ENOMEM;
204
205 amd_northbridges.nb = nb;
206 amd_northbridges.num = i;
207
208 link = misc = root = NULL;
209 for (i = 0; i != amd_northbridges.num; i++) {
210 node_to_amd_nb(i)->root = root =
211 next_northbridge(root, amd_root_ids);
212 node_to_amd_nb(i)->misc = misc =
213 next_northbridge(misc, amd_nb_misc_ids);
214 node_to_amd_nb(i)->link = link =
215 next_northbridge(link, amd_nb_link_ids);
216 }
217
218 if (amd_gart_present())
219 amd_northbridges.flags |= AMD_NB_GART;
220
221 /*
222 * Check for L3 cache presence.
223 */
224 if (!cpuid_edx(0x80000006))
225 return 0;
226
227 /*
228 * Some CPU families support L3 Cache Index Disable. There are some
229 * limitations because of E382 and E388 on family 0x10.
230 */
231 if (boot_cpu_data.x86 == 0x10 &&
232 boot_cpu_data.x86_model >= 0x8 &&
233 (boot_cpu_data.x86_model > 0x9 ||
234 boot_cpu_data.x86_mask >= 0x1))
235 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
236
237 if (boot_cpu_data.x86 == 0x15)
238 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
239
240 /* L3 cache partitioning is supported on family 0x15 */
241 if (boot_cpu_data.x86 == 0x15)
242 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
243
244 return 0;
245}
246EXPORT_SYMBOL_GPL(amd_cache_northbridges);
247
248/*
249 * Ignores subdevice/subvendor but as far as I can figure out
250 * they're useless anyways
251 */
252bool __init early_is_amd_nb(u32 device)
253{
254 const struct pci_device_id *id;
255 u32 vendor = device & 0xffff;
256
257 device >>= 16;
258 for (id = amd_nb_misc_ids; id->vendor; id++)
259 if (vendor == id->vendor && device == id->device)
260 return true;
261 return false;
262}
263
264struct resource *amd_get_mmconfig_range(struct resource *res)
265{
266 u32 address;
267 u64 base, msr;
268 unsigned int segn_busn_bits;
269
270 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
271 return NULL;
272
273 /* assume all cpus from fam10h have mmconfig */
274 if (boot_cpu_data.x86 < 0x10)
275 return NULL;
276
277 address = MSR_FAM10H_MMIO_CONF_BASE;
278 rdmsrl(address, msr);
279
280 /* mmconfig is not enabled */
281 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
282 return NULL;
283
284 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
285
286 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
287 FAM10H_MMIO_CONF_BUSRANGE_MASK;
288
289 res->flags = IORESOURCE_MEM;
290 res->start = base;
291 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
292 return res;
293}
294
295int amd_get_subcaches(int cpu)
296{
297 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
298 unsigned int mask;
299
300 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
301 return 0;
302
303 pci_read_config_dword(link, 0x1d4, &mask);
304
305 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
306}
307
308int amd_set_subcaches(int cpu, unsigned long mask)
309{
310 static unsigned int reset, ban;
311 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
312 unsigned int reg;
313 int cuid;
314
315 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
316 return -EINVAL;
317
318 /* if necessary, collect reset state of L3 partitioning and BAN mode */
319 if (reset == 0) {
320 pci_read_config_dword(nb->link, 0x1d4, &reset);
321 pci_read_config_dword(nb->misc, 0x1b8, &ban);
322 ban &= 0x180000;
323 }
324
325 /* deactivate BAN mode if any subcaches are to be disabled */
326 if (mask != 0xf) {
327 pci_read_config_dword(nb->misc, 0x1b8, ®);
328 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
329 }
330
331 cuid = cpu_data(cpu).cpu_core_id;
332 mask <<= 4 * cuid;
333 mask |= (0xf ^ (1 << cuid)) << 26;
334
335 pci_write_config_dword(nb->link, 0x1d4, mask);
336
337 /* reset BAN mode if L3 partitioning returned to reset state */
338 pci_read_config_dword(nb->link, 0x1d4, ®);
339 if (reg == reset) {
340 pci_read_config_dword(nb->misc, 0x1b8, ®);
341 reg &= ~0x180000;
342 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
343 }
344
345 return 0;
346}
347
348static void amd_cache_gart(void)
349{
350 u16 i;
351
352 if (!amd_nb_has_feature(AMD_NB_GART))
353 return;
354
355 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
356 if (!flush_words) {
357 amd_northbridges.flags &= ~AMD_NB_GART;
358 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
359 return;
360 }
361
362 for (i = 0; i != amd_northbridges.num; i++)
363 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
364}
365
366void amd_flush_garts(void)
367{
368 int flushed, i;
369 unsigned long flags;
370 static DEFINE_SPINLOCK(gart_lock);
371
372 if (!amd_nb_has_feature(AMD_NB_GART))
373 return;
374
375 /*
376 * Avoid races between AGP and IOMMU. In theory it's not needed
377 * but I'm not sure if the hardware won't lose flush requests
378 * when another is pending. This whole thing is so expensive anyways
379 * that it doesn't matter to serialize more. -AK
380 */
381 spin_lock_irqsave(&gart_lock, flags);
382 flushed = 0;
383 for (i = 0; i < amd_northbridges.num; i++) {
384 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
385 flush_words[i] | 1);
386 flushed++;
387 }
388 for (i = 0; i < amd_northbridges.num; i++) {
389 u32 w;
390 /* Make sure the hardware actually executed the flush*/
391 for (;;) {
392 pci_read_config_dword(node_to_amd_nb(i)->misc,
393 0x9c, &w);
394 if (!(w & 1))
395 break;
396 cpu_relax();
397 }
398 }
399 spin_unlock_irqrestore(&gart_lock, flags);
400 if (!flushed)
401 pr_notice("nothing to flush?\n");
402}
403EXPORT_SYMBOL_GPL(amd_flush_garts);
404
405static __init int init_amd_nbs(void)
406{
407 amd_cache_northbridges();
408 amd_cache_gart();
409
410 return 0;
411}
412
413/* This has to go after the PCI subsystem */
414fs_initcall(init_amd_nbs);