Loading...
1/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/export.h>
13#include <linux/spinlock.h>
14#include <asm/amd_nb.h>
15
16#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
17#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
18#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
19#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
20#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
21#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
22
23/* Protect the PCI config register pairs used for SMN and DF indirect access. */
24static DEFINE_MUTEX(smn_mutex);
25
26static u32 *flush_words;
27
28static const struct pci_device_id amd_root_ids[] = {
29 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
30 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
31 {}
32};
33
34#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
35
36const struct pci_device_id amd_nb_misc_ids[] = {
37 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
38 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
39 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
40 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
41 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
42 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
43 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
44 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
45 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
46 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
47 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
48 {}
49};
50EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
51
52static const struct pci_device_id amd_nb_link_ids[] = {
53 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
54 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
55 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
56 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
57 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
58 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
59 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
60 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
61 {}
62};
63
64const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
65 { 0x00, 0x18, 0x20 },
66 { 0xff, 0x00, 0x20 },
67 { 0xfe, 0x00, 0x20 },
68 { }
69};
70
71static struct amd_northbridge_info amd_northbridges;
72
73u16 amd_nb_num(void)
74{
75 return amd_northbridges.num;
76}
77EXPORT_SYMBOL_GPL(amd_nb_num);
78
79bool amd_nb_has_feature(unsigned int feature)
80{
81 return ((amd_northbridges.flags & feature) == feature);
82}
83EXPORT_SYMBOL_GPL(amd_nb_has_feature);
84
85struct amd_northbridge *node_to_amd_nb(int node)
86{
87 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
88}
89EXPORT_SYMBOL_GPL(node_to_amd_nb);
90
91static struct pci_dev *next_northbridge(struct pci_dev *dev,
92 const struct pci_device_id *ids)
93{
94 do {
95 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
96 if (!dev)
97 break;
98 } while (!pci_match_id(ids, dev));
99 return dev;
100}
101
102static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
103{
104 struct pci_dev *root;
105 int err = -ENODEV;
106
107 if (node >= amd_northbridges.num)
108 goto out;
109
110 root = node_to_amd_nb(node)->root;
111 if (!root)
112 goto out;
113
114 mutex_lock(&smn_mutex);
115
116 err = pci_write_config_dword(root, 0x60, address);
117 if (err) {
118 pr_warn("Error programming SMN address 0x%x.\n", address);
119 goto out_unlock;
120 }
121
122 err = (write ? pci_write_config_dword(root, 0x64, *value)
123 : pci_read_config_dword(root, 0x64, value));
124 if (err)
125 pr_warn("Error %s SMN address 0x%x.\n",
126 (write ? "writing to" : "reading from"), address);
127
128out_unlock:
129 mutex_unlock(&smn_mutex);
130
131out:
132 return err;
133}
134
135int amd_smn_read(u16 node, u32 address, u32 *value)
136{
137 return __amd_smn_rw(node, address, value, false);
138}
139EXPORT_SYMBOL_GPL(amd_smn_read);
140
141int amd_smn_write(u16 node, u32 address, u32 value)
142{
143 return __amd_smn_rw(node, address, &value, true);
144}
145EXPORT_SYMBOL_GPL(amd_smn_write);
146
147/*
148 * Data Fabric Indirect Access uses FICAA/FICAD.
149 *
150 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
151 * on the device's Instance Id and the PCI function and register offset of
152 * the desired register.
153 *
154 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
155 * and FICAD HI registers but so far we only need the LO register.
156 */
157int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
158{
159 struct pci_dev *F4;
160 u32 ficaa;
161 int err = -ENODEV;
162
163 if (node >= amd_northbridges.num)
164 goto out;
165
166 F4 = node_to_amd_nb(node)->link;
167 if (!F4)
168 goto out;
169
170 ficaa = 1;
171 ficaa |= reg & 0x3FC;
172 ficaa |= (func & 0x7) << 11;
173 ficaa |= instance_id << 16;
174
175 mutex_lock(&smn_mutex);
176
177 err = pci_write_config_dword(F4, 0x5C, ficaa);
178 if (err) {
179 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
180 goto out_unlock;
181 }
182
183 err = pci_read_config_dword(F4, 0x98, lo);
184 if (err)
185 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
186
187out_unlock:
188 mutex_unlock(&smn_mutex);
189
190out:
191 return err;
192}
193EXPORT_SYMBOL_GPL(amd_df_indirect_read);
194
195int amd_cache_northbridges(void)
196{
197 u16 i = 0;
198 struct amd_northbridge *nb;
199 struct pci_dev *root, *misc, *link;
200
201 if (amd_northbridges.num)
202 return 0;
203
204 misc = NULL;
205 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
206 i++;
207
208 if (!i)
209 return -ENODEV;
210
211 nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
212 if (!nb)
213 return -ENOMEM;
214
215 amd_northbridges.nb = nb;
216 amd_northbridges.num = i;
217
218 link = misc = root = NULL;
219 for (i = 0; i != amd_northbridges.num; i++) {
220 node_to_amd_nb(i)->root = root =
221 next_northbridge(root, amd_root_ids);
222 node_to_amd_nb(i)->misc = misc =
223 next_northbridge(misc, amd_nb_misc_ids);
224 node_to_amd_nb(i)->link = link =
225 next_northbridge(link, amd_nb_link_ids);
226 }
227
228 if (amd_gart_present())
229 amd_northbridges.flags |= AMD_NB_GART;
230
231 /*
232 * Check for L3 cache presence.
233 */
234 if (!cpuid_edx(0x80000006))
235 return 0;
236
237 /*
238 * Some CPU families support L3 Cache Index Disable. There are some
239 * limitations because of E382 and E388 on family 0x10.
240 */
241 if (boot_cpu_data.x86 == 0x10 &&
242 boot_cpu_data.x86_model >= 0x8 &&
243 (boot_cpu_data.x86_model > 0x9 ||
244 boot_cpu_data.x86_stepping >= 0x1))
245 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
246
247 if (boot_cpu_data.x86 == 0x15)
248 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
249
250 /* L3 cache partitioning is supported on family 0x15 */
251 if (boot_cpu_data.x86 == 0x15)
252 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
253
254 return 0;
255}
256EXPORT_SYMBOL_GPL(amd_cache_northbridges);
257
258/*
259 * Ignores subdevice/subvendor but as far as I can figure out
260 * they're useless anyways
261 */
262bool __init early_is_amd_nb(u32 device)
263{
264 const struct pci_device_id *id;
265 u32 vendor = device & 0xffff;
266
267 device >>= 16;
268 for (id = amd_nb_misc_ids; id->vendor; id++)
269 if (vendor == id->vendor && device == id->device)
270 return true;
271 return false;
272}
273
274struct resource *amd_get_mmconfig_range(struct resource *res)
275{
276 u32 address;
277 u64 base, msr;
278 unsigned int segn_busn_bits;
279
280 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
281 return NULL;
282
283 /* assume all cpus from fam10h have mmconfig */
284 if (boot_cpu_data.x86 < 0x10)
285 return NULL;
286
287 address = MSR_FAM10H_MMIO_CONF_BASE;
288 rdmsrl(address, msr);
289
290 /* mmconfig is not enabled */
291 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
292 return NULL;
293
294 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
295
296 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
297 FAM10H_MMIO_CONF_BUSRANGE_MASK;
298
299 res->flags = IORESOURCE_MEM;
300 res->start = base;
301 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
302 return res;
303}
304
305int amd_get_subcaches(int cpu)
306{
307 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
308 unsigned int mask;
309
310 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
311 return 0;
312
313 pci_read_config_dword(link, 0x1d4, &mask);
314
315 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
316}
317
318int amd_set_subcaches(int cpu, unsigned long mask)
319{
320 static unsigned int reset, ban;
321 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
322 unsigned int reg;
323 int cuid;
324
325 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
326 return -EINVAL;
327
328 /* if necessary, collect reset state of L3 partitioning and BAN mode */
329 if (reset == 0) {
330 pci_read_config_dword(nb->link, 0x1d4, &reset);
331 pci_read_config_dword(nb->misc, 0x1b8, &ban);
332 ban &= 0x180000;
333 }
334
335 /* deactivate BAN mode if any subcaches are to be disabled */
336 if (mask != 0xf) {
337 pci_read_config_dword(nb->misc, 0x1b8, ®);
338 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
339 }
340
341 cuid = cpu_data(cpu).cpu_core_id;
342 mask <<= 4 * cuid;
343 mask |= (0xf ^ (1 << cuid)) << 26;
344
345 pci_write_config_dword(nb->link, 0x1d4, mask);
346
347 /* reset BAN mode if L3 partitioning returned to reset state */
348 pci_read_config_dword(nb->link, 0x1d4, ®);
349 if (reg == reset) {
350 pci_read_config_dword(nb->misc, 0x1b8, ®);
351 reg &= ~0x180000;
352 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
353 }
354
355 return 0;
356}
357
358static void amd_cache_gart(void)
359{
360 u16 i;
361
362 if (!amd_nb_has_feature(AMD_NB_GART))
363 return;
364
365 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
366 if (!flush_words) {
367 amd_northbridges.flags &= ~AMD_NB_GART;
368 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
369 return;
370 }
371
372 for (i = 0; i != amd_northbridges.num; i++)
373 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
374}
375
376void amd_flush_garts(void)
377{
378 int flushed, i;
379 unsigned long flags;
380 static DEFINE_SPINLOCK(gart_lock);
381
382 if (!amd_nb_has_feature(AMD_NB_GART))
383 return;
384
385 /*
386 * Avoid races between AGP and IOMMU. In theory it's not needed
387 * but I'm not sure if the hardware won't lose flush requests
388 * when another is pending. This whole thing is so expensive anyways
389 * that it doesn't matter to serialize more. -AK
390 */
391 spin_lock_irqsave(&gart_lock, flags);
392 flushed = 0;
393 for (i = 0; i < amd_northbridges.num; i++) {
394 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
395 flush_words[i] | 1);
396 flushed++;
397 }
398 for (i = 0; i < amd_northbridges.num; i++) {
399 u32 w;
400 /* Make sure the hardware actually executed the flush*/
401 for (;;) {
402 pci_read_config_dword(node_to_amd_nb(i)->misc,
403 0x9c, &w);
404 if (!(w & 1))
405 break;
406 cpu_relax();
407 }
408 }
409 spin_unlock_irqrestore(&gart_lock, flags);
410 if (!flushed)
411 pr_notice("nothing to flush?\n");
412}
413EXPORT_SYMBOL_GPL(amd_flush_garts);
414
415static void __fix_erratum_688(void *info)
416{
417#define MSR_AMD64_IC_CFG 0xC0011021
418
419 msr_set_bit(MSR_AMD64_IC_CFG, 3);
420 msr_set_bit(MSR_AMD64_IC_CFG, 14);
421}
422
423/* Apply erratum 688 fix so machines without a BIOS fix work. */
424static __init void fix_erratum_688(void)
425{
426 struct pci_dev *F4;
427 u32 val;
428
429 if (boot_cpu_data.x86 != 0x14)
430 return;
431
432 if (!amd_northbridges.num)
433 return;
434
435 F4 = node_to_amd_nb(0)->link;
436 if (!F4)
437 return;
438
439 if (pci_read_config_dword(F4, 0x164, &val))
440 return;
441
442 if (val & BIT(2))
443 return;
444
445 on_each_cpu(__fix_erratum_688, NULL, 0);
446
447 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
448}
449
450static __init int init_amd_nbs(void)
451{
452 amd_cache_northbridges();
453 amd_cache_gart();
454
455 fix_erratum_688();
456
457 return 0;
458}
459
460/* This has to go after the PCI subsystem */
461fs_initcall(init_amd_nbs);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Shared support code for AMD K8 northbridges and derivates.
4 * Copyright 2006 Andi Kleen, SUSE Labs.
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/export.h>
14#include <linux/spinlock.h>
15#include <linux/pci_ids.h>
16#include <asm/amd_nb.h>
17
18#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
21#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
22#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
23#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
24#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
25#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
26#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
27#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
28
29/* Protect the PCI config register pairs used for SMN and DF indirect access. */
30static DEFINE_MUTEX(smn_mutex);
31
32static u32 *flush_words;
33
34static const struct pci_device_id amd_root_ids[] = {
35 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
36 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
37 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
38 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
39 {}
40};
41
42#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
43
44static const struct pci_device_id amd_nb_misc_ids[] = {
45 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
46 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
47 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
48 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
49 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
50 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
51 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
52 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
53 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
54 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
55 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
56 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
57 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
58 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
59 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
60 {}
61};
62
63static const struct pci_device_id amd_nb_link_ids[] = {
64 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
65 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
66 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
67 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
68 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
69 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
70 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
71 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
72 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
73 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
74 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
75 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
76 {}
77};
78
79static const struct pci_device_id hygon_root_ids[] = {
80 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
81 {}
82};
83
84static const struct pci_device_id hygon_nb_misc_ids[] = {
85 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
86 {}
87};
88
89static const struct pci_device_id hygon_nb_link_ids[] = {
90 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
91 {}
92};
93
94const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
95 { 0x00, 0x18, 0x20 },
96 { 0xff, 0x00, 0x20 },
97 { 0xfe, 0x00, 0x20 },
98 { }
99};
100
101static struct amd_northbridge_info amd_northbridges;
102
103u16 amd_nb_num(void)
104{
105 return amd_northbridges.num;
106}
107EXPORT_SYMBOL_GPL(amd_nb_num);
108
109bool amd_nb_has_feature(unsigned int feature)
110{
111 return ((amd_northbridges.flags & feature) == feature);
112}
113EXPORT_SYMBOL_GPL(amd_nb_has_feature);
114
115struct amd_northbridge *node_to_amd_nb(int node)
116{
117 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
118}
119EXPORT_SYMBOL_GPL(node_to_amd_nb);
120
121static struct pci_dev *next_northbridge(struct pci_dev *dev,
122 const struct pci_device_id *ids)
123{
124 do {
125 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
126 if (!dev)
127 break;
128 } while (!pci_match_id(ids, dev));
129 return dev;
130}
131
132static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
133{
134 struct pci_dev *root;
135 int err = -ENODEV;
136
137 if (node >= amd_northbridges.num)
138 goto out;
139
140 root = node_to_amd_nb(node)->root;
141 if (!root)
142 goto out;
143
144 mutex_lock(&smn_mutex);
145
146 err = pci_write_config_dword(root, 0x60, address);
147 if (err) {
148 pr_warn("Error programming SMN address 0x%x.\n", address);
149 goto out_unlock;
150 }
151
152 err = (write ? pci_write_config_dword(root, 0x64, *value)
153 : pci_read_config_dword(root, 0x64, value));
154 if (err)
155 pr_warn("Error %s SMN address 0x%x.\n",
156 (write ? "writing to" : "reading from"), address);
157
158out_unlock:
159 mutex_unlock(&smn_mutex);
160
161out:
162 return err;
163}
164
165int amd_smn_read(u16 node, u32 address, u32 *value)
166{
167 return __amd_smn_rw(node, address, value, false);
168}
169EXPORT_SYMBOL_GPL(amd_smn_read);
170
171int amd_smn_write(u16 node, u32 address, u32 value)
172{
173 return __amd_smn_rw(node, address, &value, true);
174}
175EXPORT_SYMBOL_GPL(amd_smn_write);
176
177/*
178 * Data Fabric Indirect Access uses FICAA/FICAD.
179 *
180 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
181 * on the device's Instance Id and the PCI function and register offset of
182 * the desired register.
183 *
184 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
185 * and FICAD HI registers but so far we only need the LO register.
186 */
187int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
188{
189 struct pci_dev *F4;
190 u32 ficaa;
191 int err = -ENODEV;
192
193 if (node >= amd_northbridges.num)
194 goto out;
195
196 F4 = node_to_amd_nb(node)->link;
197 if (!F4)
198 goto out;
199
200 ficaa = 1;
201 ficaa |= reg & 0x3FC;
202 ficaa |= (func & 0x7) << 11;
203 ficaa |= instance_id << 16;
204
205 mutex_lock(&smn_mutex);
206
207 err = pci_write_config_dword(F4, 0x5C, ficaa);
208 if (err) {
209 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
210 goto out_unlock;
211 }
212
213 err = pci_read_config_dword(F4, 0x98, lo);
214 if (err)
215 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
216
217out_unlock:
218 mutex_unlock(&smn_mutex);
219
220out:
221 return err;
222}
223EXPORT_SYMBOL_GPL(amd_df_indirect_read);
224
225int amd_cache_northbridges(void)
226{
227 const struct pci_device_id *misc_ids = amd_nb_misc_ids;
228 const struct pci_device_id *link_ids = amd_nb_link_ids;
229 const struct pci_device_id *root_ids = amd_root_ids;
230 struct pci_dev *root, *misc, *link;
231 struct amd_northbridge *nb;
232 u16 roots_per_misc = 0;
233 u16 misc_count = 0;
234 u16 root_count = 0;
235 u16 i, j;
236
237 if (amd_northbridges.num)
238 return 0;
239
240 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
241 root_ids = hygon_root_ids;
242 misc_ids = hygon_nb_misc_ids;
243 link_ids = hygon_nb_link_ids;
244 }
245
246 misc = NULL;
247 while ((misc = next_northbridge(misc, misc_ids)) != NULL)
248 misc_count++;
249
250 if (!misc_count)
251 return -ENODEV;
252
253 root = NULL;
254 while ((root = next_northbridge(root, root_ids)) != NULL)
255 root_count++;
256
257 if (root_count) {
258 roots_per_misc = root_count / misc_count;
259
260 /*
261 * There should be _exactly_ N roots for each DF/SMN
262 * interface.
263 */
264 if (!roots_per_misc || (root_count % roots_per_misc)) {
265 pr_info("Unsupported AMD DF/PCI configuration found\n");
266 return -ENODEV;
267 }
268 }
269
270 nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
271 if (!nb)
272 return -ENOMEM;
273
274 amd_northbridges.nb = nb;
275 amd_northbridges.num = misc_count;
276
277 link = misc = root = NULL;
278 for (i = 0; i < amd_northbridges.num; i++) {
279 node_to_amd_nb(i)->root = root =
280 next_northbridge(root, root_ids);
281 node_to_amd_nb(i)->misc = misc =
282 next_northbridge(misc, misc_ids);
283 node_to_amd_nb(i)->link = link =
284 next_northbridge(link, link_ids);
285
286 /*
287 * If there are more PCI root devices than data fabric/
288 * system management network interfaces, then the (N)
289 * PCI roots per DF/SMN interface are functionally the
290 * same (for DF/SMN access) and N-1 are redundant. N-1
291 * PCI roots should be skipped per DF/SMN interface so
292 * the following DF/SMN interfaces get mapped to
293 * correct PCI roots.
294 */
295 for (j = 1; j < roots_per_misc; j++)
296 root = next_northbridge(root, root_ids);
297 }
298
299 if (amd_gart_present())
300 amd_northbridges.flags |= AMD_NB_GART;
301
302 /*
303 * Check for L3 cache presence.
304 */
305 if (!cpuid_edx(0x80000006))
306 return 0;
307
308 /*
309 * Some CPU families support L3 Cache Index Disable. There are some
310 * limitations because of E382 and E388 on family 0x10.
311 */
312 if (boot_cpu_data.x86 == 0x10 &&
313 boot_cpu_data.x86_model >= 0x8 &&
314 (boot_cpu_data.x86_model > 0x9 ||
315 boot_cpu_data.x86_stepping >= 0x1))
316 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
317
318 if (boot_cpu_data.x86 == 0x15)
319 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
320
321 /* L3 cache partitioning is supported on family 0x15 */
322 if (boot_cpu_data.x86 == 0x15)
323 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
324
325 return 0;
326}
327EXPORT_SYMBOL_GPL(amd_cache_northbridges);
328
329/*
330 * Ignores subdevice/subvendor but as far as I can figure out
331 * they're useless anyways
332 */
333bool __init early_is_amd_nb(u32 device)
334{
335 const struct pci_device_id *misc_ids = amd_nb_misc_ids;
336 const struct pci_device_id *id;
337 u32 vendor = device & 0xffff;
338
339 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
340 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
341 return false;
342
343 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
344 misc_ids = hygon_nb_misc_ids;
345
346 device >>= 16;
347 for (id = misc_ids; id->vendor; id++)
348 if (vendor == id->vendor && device == id->device)
349 return true;
350 return false;
351}
352
353struct resource *amd_get_mmconfig_range(struct resource *res)
354{
355 u32 address;
356 u64 base, msr;
357 unsigned int segn_busn_bits;
358
359 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
360 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
361 return NULL;
362
363 /* assume all cpus from fam10h have mmconfig */
364 if (boot_cpu_data.x86 < 0x10)
365 return NULL;
366
367 address = MSR_FAM10H_MMIO_CONF_BASE;
368 rdmsrl(address, msr);
369
370 /* mmconfig is not enabled */
371 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
372 return NULL;
373
374 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
375
376 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
377 FAM10H_MMIO_CONF_BUSRANGE_MASK;
378
379 res->flags = IORESOURCE_MEM;
380 res->start = base;
381 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
382 return res;
383}
384
385int amd_get_subcaches(int cpu)
386{
387 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
388 unsigned int mask;
389
390 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
391 return 0;
392
393 pci_read_config_dword(link, 0x1d4, &mask);
394
395 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
396}
397
398int amd_set_subcaches(int cpu, unsigned long mask)
399{
400 static unsigned int reset, ban;
401 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
402 unsigned int reg;
403 int cuid;
404
405 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
406 return -EINVAL;
407
408 /* if necessary, collect reset state of L3 partitioning and BAN mode */
409 if (reset == 0) {
410 pci_read_config_dword(nb->link, 0x1d4, &reset);
411 pci_read_config_dword(nb->misc, 0x1b8, &ban);
412 ban &= 0x180000;
413 }
414
415 /* deactivate BAN mode if any subcaches are to be disabled */
416 if (mask != 0xf) {
417 pci_read_config_dword(nb->misc, 0x1b8, ®);
418 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
419 }
420
421 cuid = cpu_data(cpu).cpu_core_id;
422 mask <<= 4 * cuid;
423 mask |= (0xf ^ (1 << cuid)) << 26;
424
425 pci_write_config_dword(nb->link, 0x1d4, mask);
426
427 /* reset BAN mode if L3 partitioning returned to reset state */
428 pci_read_config_dword(nb->link, 0x1d4, ®);
429 if (reg == reset) {
430 pci_read_config_dword(nb->misc, 0x1b8, ®);
431 reg &= ~0x180000;
432 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
433 }
434
435 return 0;
436}
437
438static void amd_cache_gart(void)
439{
440 u16 i;
441
442 if (!amd_nb_has_feature(AMD_NB_GART))
443 return;
444
445 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
446 if (!flush_words) {
447 amd_northbridges.flags &= ~AMD_NB_GART;
448 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
449 return;
450 }
451
452 for (i = 0; i != amd_northbridges.num; i++)
453 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
454}
455
456void amd_flush_garts(void)
457{
458 int flushed, i;
459 unsigned long flags;
460 static DEFINE_SPINLOCK(gart_lock);
461
462 if (!amd_nb_has_feature(AMD_NB_GART))
463 return;
464
465 /*
466 * Avoid races between AGP and IOMMU. In theory it's not needed
467 * but I'm not sure if the hardware won't lose flush requests
468 * when another is pending. This whole thing is so expensive anyways
469 * that it doesn't matter to serialize more. -AK
470 */
471 spin_lock_irqsave(&gart_lock, flags);
472 flushed = 0;
473 for (i = 0; i < amd_northbridges.num; i++) {
474 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
475 flush_words[i] | 1);
476 flushed++;
477 }
478 for (i = 0; i < amd_northbridges.num; i++) {
479 u32 w;
480 /* Make sure the hardware actually executed the flush*/
481 for (;;) {
482 pci_read_config_dword(node_to_amd_nb(i)->misc,
483 0x9c, &w);
484 if (!(w & 1))
485 break;
486 cpu_relax();
487 }
488 }
489 spin_unlock_irqrestore(&gart_lock, flags);
490 if (!flushed)
491 pr_notice("nothing to flush?\n");
492}
493EXPORT_SYMBOL_GPL(amd_flush_garts);
494
495static void __fix_erratum_688(void *info)
496{
497#define MSR_AMD64_IC_CFG 0xC0011021
498
499 msr_set_bit(MSR_AMD64_IC_CFG, 3);
500 msr_set_bit(MSR_AMD64_IC_CFG, 14);
501}
502
503/* Apply erratum 688 fix so machines without a BIOS fix work. */
504static __init void fix_erratum_688(void)
505{
506 struct pci_dev *F4;
507 u32 val;
508
509 if (boot_cpu_data.x86 != 0x14)
510 return;
511
512 if (!amd_northbridges.num)
513 return;
514
515 F4 = node_to_amd_nb(0)->link;
516 if (!F4)
517 return;
518
519 if (pci_read_config_dword(F4, 0x164, &val))
520 return;
521
522 if (val & BIT(2))
523 return;
524
525 on_each_cpu(__fix_erratum_688, NULL, 0);
526
527 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
528}
529
530static __init int init_amd_nbs(void)
531{
532 amd_cache_northbridges();
533 amd_cache_gart();
534
535 fix_erratum_688();
536
537 return 0;
538}
539
540/* This has to go after the PCI subsystem */
541fs_initcall(init_amd_nbs);