Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
  5
  6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7
  8#include <linux/types.h>
  9#include <linux/slab.h>
 10#include <linux/init.h>
 11#include <linux/errno.h>
 12#include <linux/module.h>
 13#include <linux/spinlock.h>
 
 14#include <asm/amd_nb.h>
 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 16static u32 *flush_words;
 17
 18const struct pci_device_id amd_nb_misc_ids[] = {
 
 
 
 
 
 
 
 
 
 
 19	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 20	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 21	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 22	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 23	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 25	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 
 
 
 
 
 
 
 26	{}
 27};
 28EXPORT_SYMBOL(amd_nb_misc_ids);
 29
 30static const struct pci_device_id amd_nb_link_ids[] = {
 31	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 32	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 
 33	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 34	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35	{}
 36};
 37
 38const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 39	{ 0x00, 0x18, 0x20 },
 40	{ 0xff, 0x00, 0x20 },
 41	{ 0xfe, 0x00, 0x20 },
 42	{ }
 43};
 44
 45struct amd_northbridge_info amd_northbridges;
 46EXPORT_SYMBOL(amd_northbridges);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48static struct pci_dev *next_northbridge(struct pci_dev *dev,
 49					const struct pci_device_id *ids)
 50{
 51	do {
 52		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 53		if (!dev)
 54			break;
 55	} while (!pci_match_id(ids, dev));
 56	return dev;
 57}
 58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59int amd_cache_northbridges(void)
 60{
 61	u16 i = 0;
 
 
 
 62	struct amd_northbridge *nb;
 63	struct pci_dev *misc, *link;
 
 
 
 64
 65	if (amd_nb_num())
 66		return 0;
 67
 
 
 
 
 
 
 68	misc = NULL;
 69	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 70		i++;
 71
 72	if (i == 0)
 73		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74
 75	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 76	if (!nb)
 77		return -ENOMEM;
 78
 79	amd_northbridges.nb = nb;
 80	amd_northbridges.num = i;
 81
 82	link = misc = NULL;
 83	for (i = 0; i != amd_nb_num(); i++) {
 
 
 84		node_to_amd_nb(i)->misc = misc =
 85			next_northbridge(misc, amd_nb_misc_ids);
 86		node_to_amd_nb(i)->link = link =
 87			next_northbridge(link, amd_nb_link_ids);
 
 
 
 
 
 
 
 
 
 
 
 
 88	}
 89
 90	/* GART present only on Fam15h upto model 0fh */
 91	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
 92	    (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
 93		amd_northbridges.flags |= AMD_NB_GART;
 94
 95	/*
 96	 * Check for L3 cache presence.
 97	 */
 98	if (!cpuid_edx(0x80000006))
 99		return 0;
100
101	/*
102	 * Some CPU families support L3 Cache Index Disable. There are some
103	 * limitations because of E382 and E388 on family 0x10.
104	 */
105	if (boot_cpu_data.x86 == 0x10 &&
106	    boot_cpu_data.x86_model >= 0x8 &&
107	    (boot_cpu_data.x86_model > 0x9 ||
108	     boot_cpu_data.x86_mask >= 0x1))
109		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
110
111	if (boot_cpu_data.x86 == 0x15)
112		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
113
114	/* L3 cache partitioning is supported on family 0x15 */
115	if (boot_cpu_data.x86 == 0x15)
116		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
117
118	return 0;
119}
120EXPORT_SYMBOL_GPL(amd_cache_northbridges);
121
122/*
123 * Ignores subdevice/subvendor but as far as I can figure out
124 * they're useless anyways
125 */
126bool __init early_is_amd_nb(u32 device)
127{
 
128	const struct pci_device_id *id;
129	u32 vendor = device & 0xffff;
130
 
 
 
 
 
 
 
131	device >>= 16;
132	for (id = amd_nb_misc_ids; id->vendor; id++)
133		if (vendor == id->vendor && device == id->device)
134			return true;
135	return false;
136}
137
138struct resource *amd_get_mmconfig_range(struct resource *res)
139{
140	u32 address;
141	u64 base, msr;
142	unsigned segn_busn_bits;
143
144	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
 
145		return NULL;
146
147	/* assume all cpus from fam10h have mmconfig */
148        if (boot_cpu_data.x86 < 0x10)
149		return NULL;
150
151	address = MSR_FAM10H_MMIO_CONF_BASE;
152	rdmsrl(address, msr);
153
154	/* mmconfig is not enabled */
155	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
156		return NULL;
157
158	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
159
160	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
161			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
162
163	res->flags = IORESOURCE_MEM;
164	res->start = base;
165	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
166	return res;
167}
168
169int amd_get_subcaches(int cpu)
170{
171	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
172	unsigned int mask;
173	int cuid;
174
175	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
176		return 0;
177
178	pci_read_config_dword(link, 0x1d4, &mask);
179
180	cuid = cpu_data(cpu).compute_unit_id;
181	return (mask >> (4 * cuid)) & 0xf;
182}
183
184int amd_set_subcaches(int cpu, unsigned long mask)
185{
186	static unsigned int reset, ban;
187	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
188	unsigned int reg;
189	int cuid;
190
191	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
192		return -EINVAL;
193
194	/* if necessary, collect reset state of L3 partitioning and BAN mode */
195	if (reset == 0) {
196		pci_read_config_dword(nb->link, 0x1d4, &reset);
197		pci_read_config_dword(nb->misc, 0x1b8, &ban);
198		ban &= 0x180000;
199	}
200
201	/* deactivate BAN mode if any subcaches are to be disabled */
202	if (mask != 0xf) {
203		pci_read_config_dword(nb->misc, 0x1b8, &reg);
204		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
205	}
206
207	cuid = cpu_data(cpu).compute_unit_id;
208	mask <<= 4 * cuid;
209	mask |= (0xf ^ (1 << cuid)) << 26;
210
211	pci_write_config_dword(nb->link, 0x1d4, mask);
212
213	/* reset BAN mode if L3 partitioning returned to reset state */
214	pci_read_config_dword(nb->link, 0x1d4, &reg);
215	if (reg == reset) {
216		pci_read_config_dword(nb->misc, 0x1b8, &reg);
217		reg &= ~0x180000;
218		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
219	}
220
221	return 0;
222}
223
224static int amd_cache_gart(void)
225{
226	u16 i;
227
228       if (!amd_nb_has_feature(AMD_NB_GART))
229               return 0;
230
231       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
232       if (!flush_words) {
233               amd_northbridges.flags &= ~AMD_NB_GART;
234               return -ENOMEM;
235       }
236
237       for (i = 0; i != amd_nb_num(); i++)
238               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
239                                     &flush_words[i]);
240
241       return 0;
 
242}
243
244void amd_flush_garts(void)
245{
246	int flushed, i;
247	unsigned long flags;
248	static DEFINE_SPINLOCK(gart_lock);
249
250	if (!amd_nb_has_feature(AMD_NB_GART))
251		return;
252
253	/* Avoid races between AGP and IOMMU. In theory it's not needed
254	   but I'm not sure if the hardware won't lose flush requests
255	   when another is pending. This whole thing is so expensive anyways
256	   that it doesn't matter to serialize more. -AK */
 
 
257	spin_lock_irqsave(&gart_lock, flags);
258	flushed = 0;
259	for (i = 0; i < amd_nb_num(); i++) {
260		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
261				       flush_words[i] | 1);
262		flushed++;
263	}
264	for (i = 0; i < amd_nb_num(); i++) {
265		u32 w;
266		/* Make sure the hardware actually executed the flush*/
267		for (;;) {
268			pci_read_config_dword(node_to_amd_nb(i)->misc,
269					      0x9c, &w);
270			if (!(w & 1))
271				break;
272			cpu_relax();
273		}
274	}
275	spin_unlock_irqrestore(&gart_lock, flags);
276	if (!flushed)
277		pr_notice("nothing to flush?\n");
278}
279EXPORT_SYMBOL_GPL(amd_flush_garts);
280
281static __init int init_amd_nbs(void)
282{
283	int err = 0;
284
285	err = amd_cache_northbridges();
 
 
286
287	if (err < 0)
288		pr_notice("Cannot enumerate AMD northbridges\n");
 
 
 
289
290	if (amd_cache_gart() < 0)
291		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
292
293	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294}
295
296/* This has to go after the PCI subsystem */
297fs_initcall(init_amd_nbs);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Shared support code for AMD K8 northbridges and derivates.
  4 * Copyright 2006 Andi Kleen, SUSE Labs.
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/types.h>
 10#include <linux/slab.h>
 11#include <linux/init.h>
 12#include <linux/errno.h>
 13#include <linux/export.h>
 14#include <linux/spinlock.h>
 15#include <linux/pci_ids.h>
 16#include <asm/amd_nb.h>
 17
 18#define PCI_DEVICE_ID_AMD_17H_ROOT	0x1450
 19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT	0x15d0
 20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT	0x1480
 21#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT	0x1630
 22#define PCI_DEVICE_ID_AMD_17H_DF_F4	0x1464
 23#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
 24#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
 25#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
 26#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
 27#define PCI_DEVICE_ID_AMD_19H_DF_F4	0x1654
 28
 29/* Protect the PCI config register pairs used for SMN and DF indirect access. */
 30static DEFINE_MUTEX(smn_mutex);
 31
 32static u32 *flush_words;
 33
 34static const struct pci_device_id amd_root_ids[] = {
 35	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
 36	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
 37	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
 38	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
 39	{}
 40};
 41
 42#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
 43
 44static const struct pci_device_id amd_nb_misc_ids[] = {
 45	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 46	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 47	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 48	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 49	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 50	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 51	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 52	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 53	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 54	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
 55	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
 56	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
 57	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 58	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
 59	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
 60	{}
 61};
 
 62
 63static const struct pci_device_id amd_nb_link_ids[] = {
 64	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 65	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 66	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
 67	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 68	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 69	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 70	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
 71	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
 72	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
 73	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
 74	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
 75	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
 76	{}
 77};
 78
 79static const struct pci_device_id hygon_root_ids[] = {
 80	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
 81	{}
 82};
 83
 84static const struct pci_device_id hygon_nb_misc_ids[] = {
 85	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 86	{}
 87};
 88
 89static const struct pci_device_id hygon_nb_link_ids[] = {
 90	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 91	{}
 92};
 93
 94const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 95	{ 0x00, 0x18, 0x20 },
 96	{ 0xff, 0x00, 0x20 },
 97	{ 0xfe, 0x00, 0x20 },
 98	{ }
 99};
100
101static struct amd_northbridge_info amd_northbridges;
102
103u16 amd_nb_num(void)
104{
105	return amd_northbridges.num;
106}
107EXPORT_SYMBOL_GPL(amd_nb_num);
108
109bool amd_nb_has_feature(unsigned int feature)
110{
111	return ((amd_northbridges.flags & feature) == feature);
112}
113EXPORT_SYMBOL_GPL(amd_nb_has_feature);
114
115struct amd_northbridge *node_to_amd_nb(int node)
116{
117	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
118}
119EXPORT_SYMBOL_GPL(node_to_amd_nb);
120
121static struct pci_dev *next_northbridge(struct pci_dev *dev,
122					const struct pci_device_id *ids)
123{
124	do {
125		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
126		if (!dev)
127			break;
128	} while (!pci_match_id(ids, dev));
129	return dev;
130}
131
132static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
133{
134	struct pci_dev *root;
135	int err = -ENODEV;
136
137	if (node >= amd_northbridges.num)
138		goto out;
139
140	root = node_to_amd_nb(node)->root;
141	if (!root)
142		goto out;
143
144	mutex_lock(&smn_mutex);
145
146	err = pci_write_config_dword(root, 0x60, address);
147	if (err) {
148		pr_warn("Error programming SMN address 0x%x.\n", address);
149		goto out_unlock;
150	}
151
152	err = (write ? pci_write_config_dword(root, 0x64, *value)
153		     : pci_read_config_dword(root, 0x64, value));
154	if (err)
155		pr_warn("Error %s SMN address 0x%x.\n",
156			(write ? "writing to" : "reading from"), address);
157
158out_unlock:
159	mutex_unlock(&smn_mutex);
160
161out:
162	return err;
163}
164
165int amd_smn_read(u16 node, u32 address, u32 *value)
166{
167	return __amd_smn_rw(node, address, value, false);
168}
169EXPORT_SYMBOL_GPL(amd_smn_read);
170
171int amd_smn_write(u16 node, u32 address, u32 value)
172{
173	return __amd_smn_rw(node, address, &value, true);
174}
175EXPORT_SYMBOL_GPL(amd_smn_write);
176
177/*
178 * Data Fabric Indirect Access uses FICAA/FICAD.
179 *
180 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
181 * on the device's Instance Id and the PCI function and register offset of
182 * the desired register.
183 *
184 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
185 * and FICAD HI registers but so far we only need the LO register.
186 */
187int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
188{
189	struct pci_dev *F4;
190	u32 ficaa;
191	int err = -ENODEV;
192
193	if (node >= amd_northbridges.num)
194		goto out;
195
196	F4 = node_to_amd_nb(node)->link;
197	if (!F4)
198		goto out;
199
200	ficaa  = 1;
201	ficaa |= reg & 0x3FC;
202	ficaa |= (func & 0x7) << 11;
203	ficaa |= instance_id << 16;
204
205	mutex_lock(&smn_mutex);
206
207	err = pci_write_config_dword(F4, 0x5C, ficaa);
208	if (err) {
209		pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
210		goto out_unlock;
211	}
212
213	err = pci_read_config_dword(F4, 0x98, lo);
214	if (err)
215		pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
216
217out_unlock:
218	mutex_unlock(&smn_mutex);
219
220out:
221	return err;
222}
223EXPORT_SYMBOL_GPL(amd_df_indirect_read);
224
225int amd_cache_northbridges(void)
226{
227	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
228	const struct pci_device_id *link_ids = amd_nb_link_ids;
229	const struct pci_device_id *root_ids = amd_root_ids;
230	struct pci_dev *root, *misc, *link;
231	struct amd_northbridge *nb;
232	u16 roots_per_misc = 0;
233	u16 misc_count = 0;
234	u16 root_count = 0;
235	u16 i, j;
236
237	if (amd_northbridges.num)
238		return 0;
239
240	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
241		root_ids = hygon_root_ids;
242		misc_ids = hygon_nb_misc_ids;
243		link_ids = hygon_nb_link_ids;
244	}
245
246	misc = NULL;
247	while ((misc = next_northbridge(misc, misc_ids)) != NULL)
248		misc_count++;
249
250	if (!misc_count)
251		return -ENODEV;
252
253	root = NULL;
254	while ((root = next_northbridge(root, root_ids)) != NULL)
255		root_count++;
256
257	if (root_count) {
258		roots_per_misc = root_count / misc_count;
259
260		/*
261		 * There should be _exactly_ N roots for each DF/SMN
262		 * interface.
263		 */
264		if (!roots_per_misc || (root_count % roots_per_misc)) {
265			pr_info("Unsupported AMD DF/PCI configuration found\n");
266			return -ENODEV;
267		}
268	}
269
270	nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
271	if (!nb)
272		return -ENOMEM;
273
274	amd_northbridges.nb = nb;
275	amd_northbridges.num = misc_count;
276
277	link = misc = root = NULL;
278	for (i = 0; i < amd_northbridges.num; i++) {
279		node_to_amd_nb(i)->root = root =
280			next_northbridge(root, root_ids);
281		node_to_amd_nb(i)->misc = misc =
282			next_northbridge(misc, misc_ids);
283		node_to_amd_nb(i)->link = link =
284			next_northbridge(link, link_ids);
285
286		/*
287		 * If there are more PCI root devices than data fabric/
288		 * system management network interfaces, then the (N)
289		 * PCI roots per DF/SMN interface are functionally the
290		 * same (for DF/SMN access) and N-1 are redundant.  N-1
291		 * PCI roots should be skipped per DF/SMN interface so
292		 * the following DF/SMN interfaces get mapped to
293		 * correct PCI roots.
294		 */
295		for (j = 1; j < roots_per_misc; j++)
296			root = next_northbridge(root, root_ids);
297	}
298
299	if (amd_gart_present())
 
 
300		amd_northbridges.flags |= AMD_NB_GART;
301
302	/*
303	 * Check for L3 cache presence.
304	 */
305	if (!cpuid_edx(0x80000006))
306		return 0;
307
308	/*
309	 * Some CPU families support L3 Cache Index Disable. There are some
310	 * limitations because of E382 and E388 on family 0x10.
311	 */
312	if (boot_cpu_data.x86 == 0x10 &&
313	    boot_cpu_data.x86_model >= 0x8 &&
314	    (boot_cpu_data.x86_model > 0x9 ||
315	     boot_cpu_data.x86_stepping >= 0x1))
316		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
317
318	if (boot_cpu_data.x86 == 0x15)
319		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
320
321	/* L3 cache partitioning is supported on family 0x15 */
322	if (boot_cpu_data.x86 == 0x15)
323		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
324
325	return 0;
326}
327EXPORT_SYMBOL_GPL(amd_cache_northbridges);
328
329/*
330 * Ignores subdevice/subvendor but as far as I can figure out
331 * they're useless anyways
332 */
333bool __init early_is_amd_nb(u32 device)
334{
335	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
336	const struct pci_device_id *id;
337	u32 vendor = device & 0xffff;
338
339	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
340	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
341		return false;
342
343	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
344		misc_ids = hygon_nb_misc_ids;
345
346	device >>= 16;
347	for (id = misc_ids; id->vendor; id++)
348		if (vendor == id->vendor && device == id->device)
349			return true;
350	return false;
351}
352
353struct resource *amd_get_mmconfig_range(struct resource *res)
354{
355	u32 address;
356	u64 base, msr;
357	unsigned int segn_busn_bits;
358
359	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
360	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
361		return NULL;
362
363	/* assume all cpus from fam10h have mmconfig */
364	if (boot_cpu_data.x86 < 0x10)
365		return NULL;
366
367	address = MSR_FAM10H_MMIO_CONF_BASE;
368	rdmsrl(address, msr);
369
370	/* mmconfig is not enabled */
371	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
372		return NULL;
373
374	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
375
376	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
377			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
378
379	res->flags = IORESOURCE_MEM;
380	res->start = base;
381	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
382	return res;
383}
384
385int amd_get_subcaches(int cpu)
386{
387	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
388	unsigned int mask;
 
389
390	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
391		return 0;
392
393	pci_read_config_dword(link, 0x1d4, &mask);
394
395	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
 
396}
397
398int amd_set_subcaches(int cpu, unsigned long mask)
399{
400	static unsigned int reset, ban;
401	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
402	unsigned int reg;
403	int cuid;
404
405	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
406		return -EINVAL;
407
408	/* if necessary, collect reset state of L3 partitioning and BAN mode */
409	if (reset == 0) {
410		pci_read_config_dword(nb->link, 0x1d4, &reset);
411		pci_read_config_dword(nb->misc, 0x1b8, &ban);
412		ban &= 0x180000;
413	}
414
415	/* deactivate BAN mode if any subcaches are to be disabled */
416	if (mask != 0xf) {
417		pci_read_config_dword(nb->misc, 0x1b8, &reg);
418		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
419	}
420
421	cuid = cpu_data(cpu).cpu_core_id;
422	mask <<= 4 * cuid;
423	mask |= (0xf ^ (1 << cuid)) << 26;
424
425	pci_write_config_dword(nb->link, 0x1d4, mask);
426
427	/* reset BAN mode if L3 partitioning returned to reset state */
428	pci_read_config_dword(nb->link, 0x1d4, &reg);
429	if (reg == reset) {
430		pci_read_config_dword(nb->misc, 0x1b8, &reg);
431		reg &= ~0x180000;
432		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
433	}
434
435	return 0;
436}
437
438static void amd_cache_gart(void)
439{
440	u16 i;
441
442	if (!amd_nb_has_feature(AMD_NB_GART))
443		return;
444
445	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
446	if (!flush_words) {
447		amd_northbridges.flags &= ~AMD_NB_GART;
448		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
449		return;
450	}
 
 
 
451
452	for (i = 0; i != amd_northbridges.num; i++)
453		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
454}
455
456void amd_flush_garts(void)
457{
458	int flushed, i;
459	unsigned long flags;
460	static DEFINE_SPINLOCK(gart_lock);
461
462	if (!amd_nb_has_feature(AMD_NB_GART))
463		return;
464
465	/*
466	 * Avoid races between AGP and IOMMU. In theory it's not needed
467	 * but I'm not sure if the hardware won't lose flush requests
468	 * when another is pending. This whole thing is so expensive anyways
469	 * that it doesn't matter to serialize more. -AK
470	 */
471	spin_lock_irqsave(&gart_lock, flags);
472	flushed = 0;
473	for (i = 0; i < amd_northbridges.num; i++) {
474		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
475				       flush_words[i] | 1);
476		flushed++;
477	}
478	for (i = 0; i < amd_northbridges.num; i++) {
479		u32 w;
480		/* Make sure the hardware actually executed the flush*/
481		for (;;) {
482			pci_read_config_dword(node_to_amd_nb(i)->misc,
483					      0x9c, &w);
484			if (!(w & 1))
485				break;
486			cpu_relax();
487		}
488	}
489	spin_unlock_irqrestore(&gart_lock, flags);
490	if (!flushed)
491		pr_notice("nothing to flush?\n");
492}
493EXPORT_SYMBOL_GPL(amd_flush_garts);
494
495static void __fix_erratum_688(void *info)
496{
497#define MSR_AMD64_IC_CFG 0xC0011021
498
499	msr_set_bit(MSR_AMD64_IC_CFG, 3);
500	msr_set_bit(MSR_AMD64_IC_CFG, 14);
501}
502
503/* Apply erratum 688 fix so machines without a BIOS fix work. */
504static __init void fix_erratum_688(void)
505{
506	struct pci_dev *F4;
507	u32 val;
508
509	if (boot_cpu_data.x86 != 0x14)
510		return;
511
512	if (!amd_northbridges.num)
513		return;
514
515	F4 = node_to_amd_nb(0)->link;
516	if (!F4)
517		return;
518
519	if (pci_read_config_dword(F4, 0x164, &val))
520		return;
521
522	if (val & BIT(2))
523		return;
524
525	on_each_cpu(__fix_erratum_688, NULL, 0);
526
527	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
528}
529
530static __init int init_amd_nbs(void)
531{
532	amd_cache_northbridges();
533	amd_cache_gart();
534
535	fix_erratum_688();
536
537	return 0;
538}
539
540/* This has to go after the PCI subsystem */
541fs_initcall(init_amd_nbs);