Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
  5
  6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7
  8#include <linux/types.h>
  9#include <linux/slab.h>
 10#include <linux/init.h>
 11#include <linux/errno.h>
 12#include <linux/module.h>
 13#include <linux/spinlock.h>
 
 14#include <asm/amd_nb.h>
 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 16static u32 *flush_words;
 17
 18const struct pci_device_id amd_nb_misc_ids[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 20	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 21	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 22	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 23	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 25	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 26	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27	{}
 28};
 29EXPORT_SYMBOL(amd_nb_misc_ids);
 30
 31static const struct pci_device_id amd_nb_link_ids[] = {
 32	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 33	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 34	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
 35	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 36	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37	{}
 38};
 39
 40const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 41	{ 0x00, 0x18, 0x20 },
 42	{ 0xff, 0x00, 0x20 },
 43	{ 0xfe, 0x00, 0x20 },
 44	{ }
 45};
 46
 47struct amd_northbridge_info amd_northbridges;
 48EXPORT_SYMBOL(amd_northbridges);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49
 50static struct pci_dev *next_northbridge(struct pci_dev *dev,
 51					const struct pci_device_id *ids)
 52{
 53	do {
 54		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 55		if (!dev)
 56			break;
 57	} while (!pci_match_id(ids, dev));
 58	return dev;
 59}
 60
 61int amd_cache_northbridges(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62{
 63	u16 i = 0;
 
 
 
 
 
 
 
 
 
 
 64	struct amd_northbridge *nb;
 65	struct pci_dev *misc, *link;
 
 
 
 66
 67	if (amd_nb_num())
 68		return 0;
 69
 
 
 
 
 
 
 70	misc = NULL;
 71	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 72		i++;
 73
 74	if (i == 0)
 75		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 78	if (!nb)
 79		return -ENOMEM;
 80
 81	amd_northbridges.nb = nb;
 82	amd_northbridges.num = i;
 83
 84	link = misc = NULL;
 85	for (i = 0; i != amd_nb_num(); i++) {
 
 
 86		node_to_amd_nb(i)->misc = misc =
 87			next_northbridge(misc, amd_nb_misc_ids);
 88		node_to_amd_nb(i)->link = link =
 89			next_northbridge(link, amd_nb_link_ids);
 
 
 
 
 
 
 
 
 
 
 
 
 90	}
 91
 92	if (amd_gart_present())
 93		amd_northbridges.flags |= AMD_NB_GART;
 94
 95	/*
 96	 * Check for L3 cache presence.
 97	 */
 98	if (!cpuid_edx(0x80000006))
 99		return 0;
100
101	/*
102	 * Some CPU families support L3 Cache Index Disable. There are some
103	 * limitations because of E382 and E388 on family 0x10.
104	 */
105	if (boot_cpu_data.x86 == 0x10 &&
106	    boot_cpu_data.x86_model >= 0x8 &&
107	    (boot_cpu_data.x86_model > 0x9 ||
108	     boot_cpu_data.x86_mask >= 0x1))
109		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
110
111	if (boot_cpu_data.x86 == 0x15)
112		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
113
114	/* L3 cache partitioning is supported on family 0x15 */
115	if (boot_cpu_data.x86 == 0x15)
116		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
117
118	return 0;
119}
120EXPORT_SYMBOL_GPL(amd_cache_northbridges);
121
122/*
123 * Ignores subdevice/subvendor but as far as I can figure out
124 * they're useless anyways
125 */
126bool __init early_is_amd_nb(u32 device)
127{
 
128	const struct pci_device_id *id;
129	u32 vendor = device & 0xffff;
130
 
 
 
 
 
 
 
131	device >>= 16;
132	for (id = amd_nb_misc_ids; id->vendor; id++)
133		if (vendor == id->vendor && device == id->device)
134			return true;
135	return false;
136}
137
138struct resource *amd_get_mmconfig_range(struct resource *res)
139{
140	u32 address;
141	u64 base, msr;
142	unsigned segn_busn_bits;
143
144	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
 
145		return NULL;
146
147	/* assume all cpus from fam10h have mmconfig */
148        if (boot_cpu_data.x86 < 0x10)
149		return NULL;
150
151	address = MSR_FAM10H_MMIO_CONF_BASE;
152	rdmsrl(address, msr);
153
154	/* mmconfig is not enabled */
155	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
156		return NULL;
157
158	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
159
160	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
161			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
162
163	res->flags = IORESOURCE_MEM;
164	res->start = base;
165	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
166	return res;
167}
168
169int amd_get_subcaches(int cpu)
170{
171	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
172	unsigned int mask;
173
174	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
175		return 0;
176
177	pci_read_config_dword(link, 0x1d4, &mask);
178
179	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
180}
181
182int amd_set_subcaches(int cpu, unsigned long mask)
183{
184	static unsigned int reset, ban;
185	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
186	unsigned int reg;
187	int cuid;
188
189	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
190		return -EINVAL;
191
192	/* if necessary, collect reset state of L3 partitioning and BAN mode */
193	if (reset == 0) {
194		pci_read_config_dword(nb->link, 0x1d4, &reset);
195		pci_read_config_dword(nb->misc, 0x1b8, &ban);
196		ban &= 0x180000;
197	}
198
199	/* deactivate BAN mode if any subcaches are to be disabled */
200	if (mask != 0xf) {
201		pci_read_config_dword(nb->misc, 0x1b8, &reg);
202		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
203	}
204
205	cuid = cpu_data(cpu).cpu_core_id;
206	mask <<= 4 * cuid;
207	mask |= (0xf ^ (1 << cuid)) << 26;
208
209	pci_write_config_dword(nb->link, 0x1d4, mask);
210
211	/* reset BAN mode if L3 partitioning returned to reset state */
212	pci_read_config_dword(nb->link, 0x1d4, &reg);
213	if (reg == reset) {
214		pci_read_config_dword(nb->misc, 0x1b8, &reg);
215		reg &= ~0x180000;
216		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
217	}
218
219	return 0;
220}
221
222static int amd_cache_gart(void)
223{
224	u16 i;
225
226       if (!amd_nb_has_feature(AMD_NB_GART))
227               return 0;
228
229       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
230       if (!flush_words) {
231               amd_northbridges.flags &= ~AMD_NB_GART;
232               return -ENOMEM;
233       }
234
235       for (i = 0; i != amd_nb_num(); i++)
236               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
237                                     &flush_words[i]);
238
239       return 0;
 
240}
241
242void amd_flush_garts(void)
243{
244	int flushed, i;
245	unsigned long flags;
246	static DEFINE_SPINLOCK(gart_lock);
247
248	if (!amd_nb_has_feature(AMD_NB_GART))
249		return;
250
251	/* Avoid races between AGP and IOMMU. In theory it's not needed
252	   but I'm not sure if the hardware won't lose flush requests
253	   when another is pending. This whole thing is so expensive anyways
254	   that it doesn't matter to serialize more. -AK */
 
 
255	spin_lock_irqsave(&gart_lock, flags);
256	flushed = 0;
257	for (i = 0; i < amd_nb_num(); i++) {
258		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
259				       flush_words[i] | 1);
260		flushed++;
261	}
262	for (i = 0; i < amd_nb_num(); i++) {
263		u32 w;
264		/* Make sure the hardware actually executed the flush*/
265		for (;;) {
266			pci_read_config_dword(node_to_amd_nb(i)->misc,
267					      0x9c, &w);
268			if (!(w & 1))
269				break;
270			cpu_relax();
271		}
272	}
273	spin_unlock_irqrestore(&gart_lock, flags);
274	if (!flushed)
275		pr_notice("nothing to flush?\n");
276}
277EXPORT_SYMBOL_GPL(amd_flush_garts);
278
279static __init int init_amd_nbs(void)
280{
281	int err = 0;
282
283	err = amd_cache_northbridges();
 
 
284
285	if (err < 0)
286		pr_notice("Cannot enumerate AMD northbridges\n");
 
 
 
287
288	if (amd_cache_gart() < 0)
289		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
290
291	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292}
293
294/* This has to go after the PCI subsystem */
295fs_initcall(init_amd_nbs);
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Shared support code for AMD K8 northbridges and derivatives.
  4 * Copyright 2006 Andi Kleen, SUSE Labs.
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/types.h>
 10#include <linux/slab.h>
 11#include <linux/init.h>
 12#include <linux/errno.h>
 13#include <linux/export.h>
 14#include <linux/spinlock.h>
 15#include <linux/pci_ids.h>
 16#include <asm/amd_nb.h>
 17
 18#define PCI_DEVICE_ID_AMD_17H_ROOT		0x1450
 19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT		0x15d0
 20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT		0x1480
 21#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT		0x1630
 22#define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT		0x14b5
 23#define PCI_DEVICE_ID_AMD_19H_M10H_ROOT		0x14a4
 24#define PCI_DEVICE_ID_AMD_19H_M40H_ROOT		0x14b5
 25#define PCI_DEVICE_ID_AMD_19H_M60H_ROOT		0x14d8
 26#define PCI_DEVICE_ID_AMD_19H_M70H_ROOT		0x14e8
 27#define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT		0x153a
 28#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT		0x1507
 29#define PCI_DEVICE_ID_AMD_MI200_ROOT		0x14bb
 30#define PCI_DEVICE_ID_AMD_MI300_ROOT		0x14f8
 31
 32#define PCI_DEVICE_ID_AMD_17H_DF_F4		0x1464
 33#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4	0x15ec
 34#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4	0x1494
 35#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4	0x144c
 36#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4	0x1444
 37#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4	0x1728
 38#define PCI_DEVICE_ID_AMD_19H_DF_F4		0x1654
 39#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4	0x14b1
 40#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4	0x167d
 41#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4	0x166e
 42#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4	0x14e4
 43#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4	0x14f4
 44#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4	0x12fc
 45#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4	0x12c4
 46#define PCI_DEVICE_ID_AMD_MI200_DF_F4		0x14d4
 47#define PCI_DEVICE_ID_AMD_MI300_DF_F4		0x152c
 48
 49/* Protect the PCI config register pairs used for SMN. */
 50static DEFINE_MUTEX(smn_mutex);
 51
 52static u32 *flush_words;
 53
 54static const struct pci_device_id amd_root_ids[] = {
 55	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
 56	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
 57	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
 58	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
 59	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
 60	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
 61	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
 62	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
 63	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
 64	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
 65	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
 66	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
 67	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_ROOT) },
 68	{}
 69};
 70
 71#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
 72
 73static const struct pci_device_id amd_nb_misc_ids[] = {
 74	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 75	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 76	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 77	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 78	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 79	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 80	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 81	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 82	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 83	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
 84	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
 85	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
 86	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
 87	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 88	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
 89	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
 90	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
 91	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
 92	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
 93	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
 94	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
 95	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
 96	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
 97	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
 98	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) },
 99	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
100	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) },
101	{}
102};
 
103
104static const struct pci_device_id amd_nb_link_ids[] = {
105	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
106	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
107	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
108	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
109	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
110	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
111	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
112	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
113	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
114	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
115	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
116	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
117	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
118	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
119	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
120	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
121	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
122	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
123	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
124	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
125	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
126	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F4) },
127	{}
128};
129
130static const struct pci_device_id hygon_root_ids[] = {
131	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
132	{}
133};
134
135static const struct pci_device_id hygon_nb_misc_ids[] = {
136	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
137	{}
138};
139
140static const struct pci_device_id hygon_nb_link_ids[] = {
141	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
142	{}
143};
144
145const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
146	{ 0x00, 0x18, 0x20 },
147	{ 0xff, 0x00, 0x20 },
148	{ 0xfe, 0x00, 0x20 },
149	{ }
150};
151
152static struct amd_northbridge_info amd_northbridges;
153
154u16 amd_nb_num(void)
155{
156	return amd_northbridges.num;
157}
158EXPORT_SYMBOL_GPL(amd_nb_num);
159
160bool amd_nb_has_feature(unsigned int feature)
161{
162	return ((amd_northbridges.flags & feature) == feature);
163}
164EXPORT_SYMBOL_GPL(amd_nb_has_feature);
165
166struct amd_northbridge *node_to_amd_nb(int node)
167{
168	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
169}
170EXPORT_SYMBOL_GPL(node_to_amd_nb);
171
172static struct pci_dev *next_northbridge(struct pci_dev *dev,
173					const struct pci_device_id *ids)
174{
175	do {
176		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
177		if (!dev)
178			break;
179	} while (!pci_match_id(ids, dev));
180	return dev;
181}
182
183static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
184{
185	struct pci_dev *root;
186	int err = -ENODEV;
187
188	if (node >= amd_northbridges.num)
189		goto out;
190
191	root = node_to_amd_nb(node)->root;
192	if (!root)
193		goto out;
194
195	mutex_lock(&smn_mutex);
196
197	err = pci_write_config_dword(root, 0x60, address);
198	if (err) {
199		pr_warn("Error programming SMN address 0x%x.\n", address);
200		goto out_unlock;
201	}
202
203	err = (write ? pci_write_config_dword(root, 0x64, *value)
204		     : pci_read_config_dword(root, 0x64, value));
205	if (err)
206		pr_warn("Error %s SMN address 0x%x.\n",
207			(write ? "writing to" : "reading from"), address);
208
209out_unlock:
210	mutex_unlock(&smn_mutex);
211
212out:
213	return err;
214}
215
216int amd_smn_read(u16 node, u32 address, u32 *value)
217{
218	return __amd_smn_rw(node, address, value, false);
219}
220EXPORT_SYMBOL_GPL(amd_smn_read);
221
222int amd_smn_write(u16 node, u32 address, u32 value)
223{
224	return __amd_smn_rw(node, address, &value, true);
225}
226EXPORT_SYMBOL_GPL(amd_smn_write);
227
228
229static int amd_cache_northbridges(void)
230{
231	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
232	const struct pci_device_id *link_ids = amd_nb_link_ids;
233	const struct pci_device_id *root_ids = amd_root_ids;
234	struct pci_dev *root, *misc, *link;
235	struct amd_northbridge *nb;
236	u16 roots_per_misc = 0;
237	u16 misc_count = 0;
238	u16 root_count = 0;
239	u16 i, j;
240
241	if (amd_northbridges.num)
242		return 0;
243
244	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
245		root_ids = hygon_root_ids;
246		misc_ids = hygon_nb_misc_ids;
247		link_ids = hygon_nb_link_ids;
248	}
249
250	misc = NULL;
251	while ((misc = next_northbridge(misc, misc_ids)))
252		misc_count++;
253
254	if (!misc_count)
255		return -ENODEV;
256
257	root = NULL;
258	while ((root = next_northbridge(root, root_ids)))
259		root_count++;
260
261	if (root_count) {
262		roots_per_misc = root_count / misc_count;
263
264		/*
265		 * There should be _exactly_ N roots for each DF/SMN
266		 * interface.
267		 */
268		if (!roots_per_misc || (root_count % roots_per_misc)) {
269			pr_info("Unsupported AMD DF/PCI configuration found\n");
270			return -ENODEV;
271		}
272	}
273
274	nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
275	if (!nb)
276		return -ENOMEM;
277
278	amd_northbridges.nb = nb;
279	amd_northbridges.num = misc_count;
280
281	link = misc = root = NULL;
282	for (i = 0; i < amd_northbridges.num; i++) {
283		node_to_amd_nb(i)->root = root =
284			next_northbridge(root, root_ids);
285		node_to_amd_nb(i)->misc = misc =
286			next_northbridge(misc, misc_ids);
287		node_to_amd_nb(i)->link = link =
288			next_northbridge(link, link_ids);
289
290		/*
291		 * If there are more PCI root devices than data fabric/
292		 * system management network interfaces, then the (N)
293		 * PCI roots per DF/SMN interface are functionally the
294		 * same (for DF/SMN access) and N-1 are redundant.  N-1
295		 * PCI roots should be skipped per DF/SMN interface so
296		 * the following DF/SMN interfaces get mapped to
297		 * correct PCI roots.
298		 */
299		for (j = 1; j < roots_per_misc; j++)
300			root = next_northbridge(root, root_ids);
301	}
302
303	if (amd_gart_present())
304		amd_northbridges.flags |= AMD_NB_GART;
305
306	/*
307	 * Check for L3 cache presence.
308	 */
309	if (!cpuid_edx(0x80000006))
310		return 0;
311
312	/*
313	 * Some CPU families support L3 Cache Index Disable. There are some
314	 * limitations because of E382 and E388 on family 0x10.
315	 */
316	if (boot_cpu_data.x86 == 0x10 &&
317	    boot_cpu_data.x86_model >= 0x8 &&
318	    (boot_cpu_data.x86_model > 0x9 ||
319	     boot_cpu_data.x86_stepping >= 0x1))
320		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
321
322	if (boot_cpu_data.x86 == 0x15)
323		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
324
325	/* L3 cache partitioning is supported on family 0x15 */
326	if (boot_cpu_data.x86 == 0x15)
327		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
328
329	return 0;
330}
 
331
332/*
333 * Ignores subdevice/subvendor but as far as I can figure out
334 * they're useless anyways
335 */
336bool __init early_is_amd_nb(u32 device)
337{
338	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
339	const struct pci_device_id *id;
340	u32 vendor = device & 0xffff;
341
342	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
343	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
344		return false;
345
346	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
347		misc_ids = hygon_nb_misc_ids;
348
349	device >>= 16;
350	for (id = misc_ids; id->vendor; id++)
351		if (vendor == id->vendor && device == id->device)
352			return true;
353	return false;
354}
355
356struct resource *amd_get_mmconfig_range(struct resource *res)
357{
358	u32 address;
359	u64 base, msr;
360	unsigned int segn_busn_bits;
361
362	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
363	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
364		return NULL;
365
366	/* assume all cpus from fam10h have mmconfig */
367	if (boot_cpu_data.x86 < 0x10)
368		return NULL;
369
370	address = MSR_FAM10H_MMIO_CONF_BASE;
371	rdmsrl(address, msr);
372
373	/* mmconfig is not enabled */
374	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
375		return NULL;
376
377	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
378
379	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
380			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
381
382	res->flags = IORESOURCE_MEM;
383	res->start = base;
384	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
385	return res;
386}
387
388int amd_get_subcaches(int cpu)
389{
390	struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link;
391	unsigned int mask;
392
393	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
394		return 0;
395
396	pci_read_config_dword(link, 0x1d4, &mask);
397
398	return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf;
399}
400
401int amd_set_subcaches(int cpu, unsigned long mask)
402{
403	static unsigned int reset, ban;
404	struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu));
405	unsigned int reg;
406	int cuid;
407
408	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
409		return -EINVAL;
410
411	/* if necessary, collect reset state of L3 partitioning and BAN mode */
412	if (reset == 0) {
413		pci_read_config_dword(nb->link, 0x1d4, &reset);
414		pci_read_config_dword(nb->misc, 0x1b8, &ban);
415		ban &= 0x180000;
416	}
417
418	/* deactivate BAN mode if any subcaches are to be disabled */
419	if (mask != 0xf) {
420		pci_read_config_dword(nb->misc, 0x1b8, &reg);
421		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
422	}
423
424	cuid = cpu_data(cpu).topo.core_id;
425	mask <<= 4 * cuid;
426	mask |= (0xf ^ (1 << cuid)) << 26;
427
428	pci_write_config_dword(nb->link, 0x1d4, mask);
429
430	/* reset BAN mode if L3 partitioning returned to reset state */
431	pci_read_config_dword(nb->link, 0x1d4, &reg);
432	if (reg == reset) {
433		pci_read_config_dword(nb->misc, 0x1b8, &reg);
434		reg &= ~0x180000;
435		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
436	}
437
438	return 0;
439}
440
441static void amd_cache_gart(void)
442{
443	u16 i;
444
445	if (!amd_nb_has_feature(AMD_NB_GART))
446		return;
447
448	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
449	if (!flush_words) {
450		amd_northbridges.flags &= ~AMD_NB_GART;
451		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
452		return;
453	}
 
 
 
454
455	for (i = 0; i != amd_northbridges.num; i++)
456		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
457}
458
459void amd_flush_garts(void)
460{
461	int flushed, i;
462	unsigned long flags;
463	static DEFINE_SPINLOCK(gart_lock);
464
465	if (!amd_nb_has_feature(AMD_NB_GART))
466		return;
467
468	/*
469	 * Avoid races between AGP and IOMMU. In theory it's not needed
470	 * but I'm not sure if the hardware won't lose flush requests
471	 * when another is pending. This whole thing is so expensive anyways
472	 * that it doesn't matter to serialize more. -AK
473	 */
474	spin_lock_irqsave(&gart_lock, flags);
475	flushed = 0;
476	for (i = 0; i < amd_northbridges.num; i++) {
477		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
478				       flush_words[i] | 1);
479		flushed++;
480	}
481	for (i = 0; i < amd_northbridges.num; i++) {
482		u32 w;
483		/* Make sure the hardware actually executed the flush*/
484		for (;;) {
485			pci_read_config_dword(node_to_amd_nb(i)->misc,
486					      0x9c, &w);
487			if (!(w & 1))
488				break;
489			cpu_relax();
490		}
491	}
492	spin_unlock_irqrestore(&gart_lock, flags);
493	if (!flushed)
494		pr_notice("nothing to flush?\n");
495}
496EXPORT_SYMBOL_GPL(amd_flush_garts);
497
498static void __fix_erratum_688(void *info)
499{
500#define MSR_AMD64_IC_CFG 0xC0011021
501
502	msr_set_bit(MSR_AMD64_IC_CFG, 3);
503	msr_set_bit(MSR_AMD64_IC_CFG, 14);
504}
505
506/* Apply erratum 688 fix so machines without a BIOS fix work. */
507static __init void fix_erratum_688(void)
508{
509	struct pci_dev *F4;
510	u32 val;
511
512	if (boot_cpu_data.x86 != 0x14)
513		return;
514
515	if (!amd_northbridges.num)
516		return;
517
518	F4 = node_to_amd_nb(0)->link;
519	if (!F4)
520		return;
521
522	if (pci_read_config_dword(F4, 0x164, &val))
523		return;
524
525	if (val & BIT(2))
526		return;
527
528	on_each_cpu(__fix_erratum_688, NULL, 0);
529
530	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
531}
532
533static __init int init_amd_nbs(void)
534{
535	amd_cache_northbridges();
536	amd_cache_gart();
537
538	fix_erratum_688();
539
540	return 0;
541}
542
543/* This has to go after the PCI subsystem */
544fs_initcall(init_amd_nbs);