Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
 
 
 
  5#include <linux/types.h>
  6#include <linux/slab.h>
  7#include <linux/init.h>
  8#include <linux/errno.h>
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 
 11#include <asm/amd_nb.h>
 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13static u32 *flush_words;
 14
 15const struct pci_device_id amd_nb_misc_ids[] = {
 
 
 
 
 
 
 
 
 
 
 16	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 17	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 18	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 19	{}
 20};
 21EXPORT_SYMBOL(amd_nb_misc_ids);
 22
 23static struct pci_device_id amd_nb_link_ids[] = {
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25	{}
 26};
 27
 28const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 29	{ 0x00, 0x18, 0x20 },
 30	{ 0xff, 0x00, 0x20 },
 31	{ 0xfe, 0x00, 0x20 },
 32	{ }
 33};
 34
 35struct amd_northbridge_info amd_northbridges;
 36EXPORT_SYMBOL(amd_northbridges);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38static struct pci_dev *next_northbridge(struct pci_dev *dev,
 39					const struct pci_device_id *ids)
 40{
 41	do {
 42		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 43		if (!dev)
 44			break;
 45	} while (!pci_match_id(ids, dev));
 46	return dev;
 47}
 48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49int amd_cache_northbridges(void)
 50{
 51	u16 i = 0;
 
 
 
 52	struct amd_northbridge *nb;
 53	struct pci_dev *misc, *link;
 
 
 
 54
 55	if (amd_nb_num())
 56		return 0;
 57
 
 
 
 
 
 
 58	misc = NULL;
 59	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 60		i++;
 61
 62	if (i == 0)
 63		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64
 65	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 66	if (!nb)
 67		return -ENOMEM;
 68
 69	amd_northbridges.nb = nb;
 70	amd_northbridges.num = i;
 71
 72	link = misc = NULL;
 73	for (i = 0; i != amd_nb_num(); i++) {
 
 
 74		node_to_amd_nb(i)->misc = misc =
 75			next_northbridge(misc, amd_nb_misc_ids);
 76		node_to_amd_nb(i)->link = link =
 77			next_northbridge(link, amd_nb_link_ids);
 78        }
 
 
 
 
 
 
 
 
 
 
 
 
 79
 80	/* some CPU families (e.g. family 0x11) do not support GART */
 81	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
 82	    boot_cpu_data.x86 == 0x15)
 83		amd_northbridges.flags |= AMD_NB_GART;
 84
 85	/*
 
 
 
 
 
 
 86	 * Some CPU families support L3 Cache Index Disable. There are some
 87	 * limitations because of E382 and E388 on family 0x10.
 88	 */
 89	if (boot_cpu_data.x86 == 0x10 &&
 90	    boot_cpu_data.x86_model >= 0x8 &&
 91	    (boot_cpu_data.x86_model > 0x9 ||
 92	     boot_cpu_data.x86_mask >= 0x1))
 93		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 94
 95	if (boot_cpu_data.x86 == 0x15)
 96		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 97
 98	/* L3 cache partitioning is supported on family 0x15 */
 99	if (boot_cpu_data.x86 == 0x15)
100		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101
102	return 0;
103}
104EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105
106/*
107 * Ignores subdevice/subvendor but as far as I can figure out
108 * they're useless anyways
109 */
110bool __init early_is_amd_nb(u32 device)
111{
 
112	const struct pci_device_id *id;
113	u32 vendor = device & 0xffff;
114
 
 
 
 
 
 
 
115	device >>= 16;
116	for (id = amd_nb_misc_ids; id->vendor; id++)
117		if (vendor == id->vendor && device == id->device)
118			return true;
119	return false;
120}
121
122struct resource *amd_get_mmconfig_range(struct resource *res)
123{
124	u32 address;
125	u64 base, msr;
126	unsigned segn_busn_bits;
127
128	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
 
129		return NULL;
130
131	/* assume all cpus from fam10h have mmconfig */
132        if (boot_cpu_data.x86 < 0x10)
133		return NULL;
134
135	address = MSR_FAM10H_MMIO_CONF_BASE;
136	rdmsrl(address, msr);
137
138	/* mmconfig is not enabled */
139	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
140		return NULL;
141
142	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
143
144	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
145			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
146
147	res->flags = IORESOURCE_MEM;
148	res->start = base;
149	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
150	return res;
151}
152
153int amd_get_subcaches(int cpu)
154{
155	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
156	unsigned int mask;
157	int cuid;
158
159	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
160		return 0;
161
162	pci_read_config_dword(link, 0x1d4, &mask);
163
164	cuid = cpu_data(cpu).compute_unit_id;
165	return (mask >> (4 * cuid)) & 0xf;
166}
167
168int amd_set_subcaches(int cpu, int mask)
169{
170	static unsigned int reset, ban;
171	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
172	unsigned int reg;
173	int cuid;
174
175	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
176		return -EINVAL;
177
178	/* if necessary, collect reset state of L3 partitioning and BAN mode */
179	if (reset == 0) {
180		pci_read_config_dword(nb->link, 0x1d4, &reset);
181		pci_read_config_dword(nb->misc, 0x1b8, &ban);
182		ban &= 0x180000;
183	}
184
185	/* deactivate BAN mode if any subcaches are to be disabled */
186	if (mask != 0xf) {
187		pci_read_config_dword(nb->misc, 0x1b8, &reg);
188		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
189	}
190
191	cuid = cpu_data(cpu).compute_unit_id;
192	mask <<= 4 * cuid;
193	mask |= (0xf ^ (1 << cuid)) << 26;
194
195	pci_write_config_dword(nb->link, 0x1d4, mask);
196
197	/* reset BAN mode if L3 partitioning returned to reset state */
198	pci_read_config_dword(nb->link, 0x1d4, &reg);
199	if (reg == reset) {
200		pci_read_config_dword(nb->misc, 0x1b8, &reg);
201		reg &= ~0x180000;
202		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
203	}
204
205	return 0;
206}
207
208static int amd_cache_gart(void)
209{
210	u16 i;
211
212       if (!amd_nb_has_feature(AMD_NB_GART))
213               return 0;
214
215       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
216       if (!flush_words) {
217               amd_northbridges.flags &= ~AMD_NB_GART;
218               return -ENOMEM;
219       }
220
221       for (i = 0; i != amd_nb_num(); i++)
222               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
223                                     &flush_words[i]);
224
225       return 0;
 
226}
227
228void amd_flush_garts(void)
229{
230	int flushed, i;
231	unsigned long flags;
232	static DEFINE_SPINLOCK(gart_lock);
233
234	if (!amd_nb_has_feature(AMD_NB_GART))
235		return;
236
237	/* Avoid races between AGP and IOMMU. In theory it's not needed
238	   but I'm not sure if the hardware won't lose flush requests
239	   when another is pending. This whole thing is so expensive anyways
240	   that it doesn't matter to serialize more. -AK */
 
 
241	spin_lock_irqsave(&gart_lock, flags);
242	flushed = 0;
243	for (i = 0; i < amd_nb_num(); i++) {
244		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
245				       flush_words[i] | 1);
246		flushed++;
247	}
248	for (i = 0; i < amd_nb_num(); i++) {
249		u32 w;
250		/* Make sure the hardware actually executed the flush*/
251		for (;;) {
252			pci_read_config_dword(node_to_amd_nb(i)->misc,
253					      0x9c, &w);
254			if (!(w & 1))
255				break;
256			cpu_relax();
257		}
258	}
259	spin_unlock_irqrestore(&gart_lock, flags);
260	if (!flushed)
261		printk("nothing to flush?\n");
262}
263EXPORT_SYMBOL_GPL(amd_flush_garts);
264
265static __init int init_amd_nbs(void)
 
 
 
 
 
 
 
 
 
266{
267	int err = 0;
 
268
269	err = amd_cache_northbridges();
 
270
271	if (err < 0)
272		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
273
274	if (amd_cache_gart() < 0)
275		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
276		       "GART support disabled.\n");
277
278	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279}
280
281/* This has to go after the PCI subsystem */
282fs_initcall(init_amd_nbs);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Shared support code for AMD K8 northbridges and derivatives.
  4 * Copyright 2006 Andi Kleen, SUSE Labs.
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/types.h>
 10#include <linux/slab.h>
 11#include <linux/init.h>
 12#include <linux/errno.h>
 13#include <linux/export.h>
 14#include <linux/spinlock.h>
 15#include <linux/pci_ids.h>
 16#include <asm/amd_nb.h>
 17
 18#define PCI_DEVICE_ID_AMD_17H_ROOT	0x1450
 19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT	0x15d0
 20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT	0x1480
 21#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT	0x1630
 22#define PCI_DEVICE_ID_AMD_17H_DF_F4	0x1464
 23#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
 24#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
 25#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
 26#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
 27#define PCI_DEVICE_ID_AMD_19H_DF_F4	0x1654
 28#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
 29
 30/* Protect the PCI config register pairs used for SMN and DF indirect access. */
 31static DEFINE_MUTEX(smn_mutex);
 32
 33static u32 *flush_words;
 34
 35static const struct pci_device_id amd_root_ids[] = {
 36	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
 37	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
 38	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
 39	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
 40	{}
 41};
 42
 43#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
 44
 45static const struct pci_device_id amd_nb_misc_ids[] = {
 46	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 47	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 48	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 49	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 50	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 51	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 52	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 53	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 54	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 55	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
 56	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
 57	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
 58	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 59	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
 60	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
 61	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
 62	{}
 63};
 
 64
 65static const struct pci_device_id amd_nb_link_ids[] = {
 66	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 67	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 68	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
 69	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 70	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 71	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 72	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
 73	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
 74	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
 75	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
 76	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
 77	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
 78	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
 79	{}
 80};
 81
 82static const struct pci_device_id hygon_root_ids[] = {
 83	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
 84	{}
 85};
 86
 87static const struct pci_device_id hygon_nb_misc_ids[] = {
 88	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 89	{}
 90};
 91
 92static const struct pci_device_id hygon_nb_link_ids[] = {
 93	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 94	{}
 95};
 96
 97const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 98	{ 0x00, 0x18, 0x20 },
 99	{ 0xff, 0x00, 0x20 },
100	{ 0xfe, 0x00, 0x20 },
101	{ }
102};
103
104static struct amd_northbridge_info amd_northbridges;
105
106u16 amd_nb_num(void)
107{
108	return amd_northbridges.num;
109}
110EXPORT_SYMBOL_GPL(amd_nb_num);
111
112bool amd_nb_has_feature(unsigned int feature)
113{
114	return ((amd_northbridges.flags & feature) == feature);
115}
116EXPORT_SYMBOL_GPL(amd_nb_has_feature);
117
118struct amd_northbridge *node_to_amd_nb(int node)
119{
120	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
121}
122EXPORT_SYMBOL_GPL(node_to_amd_nb);
123
124static struct pci_dev *next_northbridge(struct pci_dev *dev,
125					const struct pci_device_id *ids)
126{
127	do {
128		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
129		if (!dev)
130			break;
131	} while (!pci_match_id(ids, dev));
132	return dev;
133}
134
135static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
136{
137	struct pci_dev *root;
138	int err = -ENODEV;
139
140	if (node >= amd_northbridges.num)
141		goto out;
142
143	root = node_to_amd_nb(node)->root;
144	if (!root)
145		goto out;
146
147	mutex_lock(&smn_mutex);
148
149	err = pci_write_config_dword(root, 0x60, address);
150	if (err) {
151		pr_warn("Error programming SMN address 0x%x.\n", address);
152		goto out_unlock;
153	}
154
155	err = (write ? pci_write_config_dword(root, 0x64, *value)
156		     : pci_read_config_dword(root, 0x64, value));
157	if (err)
158		pr_warn("Error %s SMN address 0x%x.\n",
159			(write ? "writing to" : "reading from"), address);
160
161out_unlock:
162	mutex_unlock(&smn_mutex);
163
164out:
165	return err;
166}
167
168int amd_smn_read(u16 node, u32 address, u32 *value)
169{
170	return __amd_smn_rw(node, address, value, false);
171}
172EXPORT_SYMBOL_GPL(amd_smn_read);
173
174int amd_smn_write(u16 node, u32 address, u32 value)
175{
176	return __amd_smn_rw(node, address, &value, true);
177}
178EXPORT_SYMBOL_GPL(amd_smn_write);
179
180/*
181 * Data Fabric Indirect Access uses FICAA/FICAD.
182 *
183 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
184 * on the device's Instance Id and the PCI function and register offset of
185 * the desired register.
186 *
187 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
188 * and FICAD HI registers but so far we only need the LO register.
189 */
190int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
191{
192	struct pci_dev *F4;
193	u32 ficaa;
194	int err = -ENODEV;
195
196	if (node >= amd_northbridges.num)
197		goto out;
198
199	F4 = node_to_amd_nb(node)->link;
200	if (!F4)
201		goto out;
202
203	ficaa  = 1;
204	ficaa |= reg & 0x3FC;
205	ficaa |= (func & 0x7) << 11;
206	ficaa |= instance_id << 16;
207
208	mutex_lock(&smn_mutex);
209
210	err = pci_write_config_dword(F4, 0x5C, ficaa);
211	if (err) {
212		pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
213		goto out_unlock;
214	}
215
216	err = pci_read_config_dword(F4, 0x98, lo);
217	if (err)
218		pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
219
220out_unlock:
221	mutex_unlock(&smn_mutex);
222
223out:
224	return err;
225}
226EXPORT_SYMBOL_GPL(amd_df_indirect_read);
227
228int amd_cache_northbridges(void)
229{
230	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
231	const struct pci_device_id *link_ids = amd_nb_link_ids;
232	const struct pci_device_id *root_ids = amd_root_ids;
233	struct pci_dev *root, *misc, *link;
234	struct amd_northbridge *nb;
235	u16 roots_per_misc = 0;
236	u16 misc_count = 0;
237	u16 root_count = 0;
238	u16 i, j;
239
240	if (amd_northbridges.num)
241		return 0;
242
243	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
244		root_ids = hygon_root_ids;
245		misc_ids = hygon_nb_misc_ids;
246		link_ids = hygon_nb_link_ids;
247	}
248
249	misc = NULL;
250	while ((misc = next_northbridge(misc, misc_ids)) != NULL)
251		misc_count++;
252
253	if (!misc_count)
254		return -ENODEV;
255
256	root = NULL;
257	while ((root = next_northbridge(root, root_ids)) != NULL)
258		root_count++;
259
260	if (root_count) {
261		roots_per_misc = root_count / misc_count;
262
263		/*
264		 * There should be _exactly_ N roots for each DF/SMN
265		 * interface.
266		 */
267		if (!roots_per_misc || (root_count % roots_per_misc)) {
268			pr_info("Unsupported AMD DF/PCI configuration found\n");
269			return -ENODEV;
270		}
271	}
272
273	nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
274	if (!nb)
275		return -ENOMEM;
276
277	amd_northbridges.nb = nb;
278	amd_northbridges.num = misc_count;
279
280	link = misc = root = NULL;
281	for (i = 0; i < amd_northbridges.num; i++) {
282		node_to_amd_nb(i)->root = root =
283			next_northbridge(root, root_ids);
284		node_to_amd_nb(i)->misc = misc =
285			next_northbridge(misc, misc_ids);
286		node_to_amd_nb(i)->link = link =
287			next_northbridge(link, link_ids);
288
289		/*
290		 * If there are more PCI root devices than data fabric/
291		 * system management network interfaces, then the (N)
292		 * PCI roots per DF/SMN interface are functionally the
293		 * same (for DF/SMN access) and N-1 are redundant.  N-1
294		 * PCI roots should be skipped per DF/SMN interface so
295		 * the following DF/SMN interfaces get mapped to
296		 * correct PCI roots.
297		 */
298		for (j = 1; j < roots_per_misc; j++)
299			root = next_northbridge(root, root_ids);
300	}
301
302	if (amd_gart_present())
 
 
303		amd_northbridges.flags |= AMD_NB_GART;
304
305	/*
306	 * Check for L3 cache presence.
307	 */
308	if (!cpuid_edx(0x80000006))
309		return 0;
310
311	/*
312	 * Some CPU families support L3 Cache Index Disable. There are some
313	 * limitations because of E382 and E388 on family 0x10.
314	 */
315	if (boot_cpu_data.x86 == 0x10 &&
316	    boot_cpu_data.x86_model >= 0x8 &&
317	    (boot_cpu_data.x86_model > 0x9 ||
318	     boot_cpu_data.x86_stepping >= 0x1))
319		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
320
321	if (boot_cpu_data.x86 == 0x15)
322		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
323
324	/* L3 cache partitioning is supported on family 0x15 */
325	if (boot_cpu_data.x86 == 0x15)
326		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
327
328	return 0;
329}
330EXPORT_SYMBOL_GPL(amd_cache_northbridges);
331
332/*
333 * Ignores subdevice/subvendor but as far as I can figure out
334 * they're useless anyways
335 */
336bool __init early_is_amd_nb(u32 device)
337{
338	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
339	const struct pci_device_id *id;
340	u32 vendor = device & 0xffff;
341
342	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
343	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
344		return false;
345
346	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
347		misc_ids = hygon_nb_misc_ids;
348
349	device >>= 16;
350	for (id = misc_ids; id->vendor; id++)
351		if (vendor == id->vendor && device == id->device)
352			return true;
353	return false;
354}
355
356struct resource *amd_get_mmconfig_range(struct resource *res)
357{
358	u32 address;
359	u64 base, msr;
360	unsigned int segn_busn_bits;
361
362	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
363	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
364		return NULL;
365
366	/* assume all cpus from fam10h have mmconfig */
367	if (boot_cpu_data.x86 < 0x10)
368		return NULL;
369
370	address = MSR_FAM10H_MMIO_CONF_BASE;
371	rdmsrl(address, msr);
372
373	/* mmconfig is not enabled */
374	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
375		return NULL;
376
377	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
378
379	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
380			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
381
382	res->flags = IORESOURCE_MEM;
383	res->start = base;
384	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
385	return res;
386}
387
388int amd_get_subcaches(int cpu)
389{
390	struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
391	unsigned int mask;
 
392
393	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
394		return 0;
395
396	pci_read_config_dword(link, 0x1d4, &mask);
397
398	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
 
399}
400
401int amd_set_subcaches(int cpu, unsigned long mask)
402{
403	static unsigned int reset, ban;
404	struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
405	unsigned int reg;
406	int cuid;
407
408	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
409		return -EINVAL;
410
411	/* if necessary, collect reset state of L3 partitioning and BAN mode */
412	if (reset == 0) {
413		pci_read_config_dword(nb->link, 0x1d4, &reset);
414		pci_read_config_dword(nb->misc, 0x1b8, &ban);
415		ban &= 0x180000;
416	}
417
418	/* deactivate BAN mode if any subcaches are to be disabled */
419	if (mask != 0xf) {
420		pci_read_config_dword(nb->misc, 0x1b8, &reg);
421		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
422	}
423
424	cuid = cpu_data(cpu).cpu_core_id;
425	mask <<= 4 * cuid;
426	mask |= (0xf ^ (1 << cuid)) << 26;
427
428	pci_write_config_dword(nb->link, 0x1d4, mask);
429
430	/* reset BAN mode if L3 partitioning returned to reset state */
431	pci_read_config_dword(nb->link, 0x1d4, &reg);
432	if (reg == reset) {
433		pci_read_config_dword(nb->misc, 0x1b8, &reg);
434		reg &= ~0x180000;
435		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
436	}
437
438	return 0;
439}
440
441static void amd_cache_gart(void)
442{
443	u16 i;
444
445	if (!amd_nb_has_feature(AMD_NB_GART))
446		return;
447
448	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
449	if (!flush_words) {
450		amd_northbridges.flags &= ~AMD_NB_GART;
451		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
452		return;
453	}
 
 
 
454
455	for (i = 0; i != amd_northbridges.num; i++)
456		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
457}
458
459void amd_flush_garts(void)
460{
461	int flushed, i;
462	unsigned long flags;
463	static DEFINE_SPINLOCK(gart_lock);
464
465	if (!amd_nb_has_feature(AMD_NB_GART))
466		return;
467
468	/*
469	 * Avoid races between AGP and IOMMU. In theory it's not needed
470	 * but I'm not sure if the hardware won't lose flush requests
471	 * when another is pending. This whole thing is so expensive anyways
472	 * that it doesn't matter to serialize more. -AK
473	 */
474	spin_lock_irqsave(&gart_lock, flags);
475	flushed = 0;
476	for (i = 0; i < amd_northbridges.num; i++) {
477		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
478				       flush_words[i] | 1);
479		flushed++;
480	}
481	for (i = 0; i < amd_northbridges.num; i++) {
482		u32 w;
483		/* Make sure the hardware actually executed the flush*/
484		for (;;) {
485			pci_read_config_dword(node_to_amd_nb(i)->misc,
486					      0x9c, &w);
487			if (!(w & 1))
488				break;
489			cpu_relax();
490		}
491	}
492	spin_unlock_irqrestore(&gart_lock, flags);
493	if (!flushed)
494		pr_notice("nothing to flush?\n");
495}
496EXPORT_SYMBOL_GPL(amd_flush_garts);
497
498static void __fix_erratum_688(void *info)
499{
500#define MSR_AMD64_IC_CFG 0xC0011021
501
502	msr_set_bit(MSR_AMD64_IC_CFG, 3);
503	msr_set_bit(MSR_AMD64_IC_CFG, 14);
504}
505
506/* Apply erratum 688 fix so machines without a BIOS fix work. */
507static __init void fix_erratum_688(void)
508{
509	struct pci_dev *F4;
510	u32 val;
511
512	if (boot_cpu_data.x86 != 0x14)
513		return;
514
515	if (!amd_northbridges.num)
516		return;
517
518	F4 = node_to_amd_nb(0)->link;
519	if (!F4)
520		return;
521
522	if (pci_read_config_dword(F4, 0x164, &val))
523		return;
524
525	if (val & BIT(2))
526		return;
527
528	on_each_cpu(__fix_erratum_688, NULL, 0);
529
530	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
531}
532
533static __init int init_amd_nbs(void)
534{
535	amd_cache_northbridges();
536	amd_cache_gart();
537
538	fix_erratum_688();
539
540	return 0;
541}
542
543/* This has to go after the PCI subsystem */
544fs_initcall(init_amd_nbs);