Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
 
 
 
  5#include <linux/types.h>
  6#include <linux/slab.h>
  7#include <linux/init.h>
  8#include <linux/errno.h>
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 
 11#include <asm/amd_nb.h>
 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13static u32 *flush_words;
 14
 15const struct pci_device_id amd_nb_misc_ids[] = {
 
 
 
 
 
 
 
 
 
 
 16	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 17	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 18	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 
 
 
 
 
 
 
 
 
 
 
 
 19	{}
 20};
 21EXPORT_SYMBOL(amd_nb_misc_ids);
 22
 23static struct pci_device_id amd_nb_link_ids[] = {
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25	{}
 26};
 27
 28const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 29	{ 0x00, 0x18, 0x20 },
 30	{ 0xff, 0x00, 0x20 },
 31	{ 0xfe, 0x00, 0x20 },
 32	{ }
 33};
 34
 35struct amd_northbridge_info amd_northbridges;
 36EXPORT_SYMBOL(amd_northbridges);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38static struct pci_dev *next_northbridge(struct pci_dev *dev,
 39					const struct pci_device_id *ids)
 40{
 41	do {
 42		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 43		if (!dev)
 44			break;
 45	} while (!pci_match_id(ids, dev));
 46	return dev;
 47}
 48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49int amd_cache_northbridges(void)
 50{
 51	u16 i = 0;
 
 
 
 52	struct amd_northbridge *nb;
 53	struct pci_dev *misc, *link;
 
 
 
 54
 55	if (amd_nb_num())
 56		return 0;
 57
 
 
 
 
 
 
 58	misc = NULL;
 59	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 60		i++;
 61
 62	if (i == 0)
 63		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64
 65	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 66	if (!nb)
 67		return -ENOMEM;
 68
 69	amd_northbridges.nb = nb;
 70	amd_northbridges.num = i;
 71
 72	link = misc = NULL;
 73	for (i = 0; i != amd_nb_num(); i++) {
 
 
 74		node_to_amd_nb(i)->misc = misc =
 75			next_northbridge(misc, amd_nb_misc_ids);
 76		node_to_amd_nb(i)->link = link =
 77			next_northbridge(link, amd_nb_link_ids);
 78        }
 
 
 
 
 
 
 
 
 
 
 
 
 79
 80	/* some CPU families (e.g. family 0x11) do not support GART */
 81	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
 82	    boot_cpu_data.x86 == 0x15)
 83		amd_northbridges.flags |= AMD_NB_GART;
 84
 85	/*
 
 
 
 
 
 
 86	 * Some CPU families support L3 Cache Index Disable. There are some
 87	 * limitations because of E382 and E388 on family 0x10.
 88	 */
 89	if (boot_cpu_data.x86 == 0x10 &&
 90	    boot_cpu_data.x86_model >= 0x8 &&
 91	    (boot_cpu_data.x86_model > 0x9 ||
 92	     boot_cpu_data.x86_mask >= 0x1))
 93		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 94
 95	if (boot_cpu_data.x86 == 0x15)
 96		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 97
 98	/* L3 cache partitioning is supported on family 0x15 */
 99	if (boot_cpu_data.x86 == 0x15)
100		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101
102	return 0;
103}
104EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105
106/*
107 * Ignores subdevice/subvendor but as far as I can figure out
108 * they're useless anyways
109 */
110bool __init early_is_amd_nb(u32 device)
111{
 
112	const struct pci_device_id *id;
113	u32 vendor = device & 0xffff;
114
 
 
 
 
 
 
 
115	device >>= 16;
116	for (id = amd_nb_misc_ids; id->vendor; id++)
117		if (vendor == id->vendor && device == id->device)
118			return true;
119	return false;
120}
121
122struct resource *amd_get_mmconfig_range(struct resource *res)
123{
124	u32 address;
125	u64 base, msr;
126	unsigned segn_busn_bits;
127
128	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
 
129		return NULL;
130
131	/* assume all cpus from fam10h have mmconfig */
132        if (boot_cpu_data.x86 < 0x10)
133		return NULL;
134
135	address = MSR_FAM10H_MMIO_CONF_BASE;
136	rdmsrl(address, msr);
137
138	/* mmconfig is not enabled */
139	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
140		return NULL;
141
142	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
143
144	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
145			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
146
147	res->flags = IORESOURCE_MEM;
148	res->start = base;
149	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
150	return res;
151}
152
153int amd_get_subcaches(int cpu)
154{
155	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
156	unsigned int mask;
157	int cuid;
158
159	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
160		return 0;
161
162	pci_read_config_dword(link, 0x1d4, &mask);
163
164	cuid = cpu_data(cpu).compute_unit_id;
165	return (mask >> (4 * cuid)) & 0xf;
166}
167
168int amd_set_subcaches(int cpu, int mask)
169{
170	static unsigned int reset, ban;
171	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
172	unsigned int reg;
173	int cuid;
174
175	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
176		return -EINVAL;
177
178	/* if necessary, collect reset state of L3 partitioning and BAN mode */
179	if (reset == 0) {
180		pci_read_config_dword(nb->link, 0x1d4, &reset);
181		pci_read_config_dword(nb->misc, 0x1b8, &ban);
182		ban &= 0x180000;
183	}
184
185	/* deactivate BAN mode if any subcaches are to be disabled */
186	if (mask != 0xf) {
187		pci_read_config_dword(nb->misc, 0x1b8, &reg);
188		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
189	}
190
191	cuid = cpu_data(cpu).compute_unit_id;
192	mask <<= 4 * cuid;
193	mask |= (0xf ^ (1 << cuid)) << 26;
194
195	pci_write_config_dword(nb->link, 0x1d4, mask);
196
197	/* reset BAN mode if L3 partitioning returned to reset state */
198	pci_read_config_dword(nb->link, 0x1d4, &reg);
199	if (reg == reset) {
200		pci_read_config_dword(nb->misc, 0x1b8, &reg);
201		reg &= ~0x180000;
202		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
203	}
204
205	return 0;
206}
207
208static int amd_cache_gart(void)
209{
210	u16 i;
211
212       if (!amd_nb_has_feature(AMD_NB_GART))
213               return 0;
214
215       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
216       if (!flush_words) {
217               amd_northbridges.flags &= ~AMD_NB_GART;
218               return -ENOMEM;
219       }
220
221       for (i = 0; i != amd_nb_num(); i++)
222               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
223                                     &flush_words[i]);
224
225       return 0;
 
226}
227
228void amd_flush_garts(void)
229{
230	int flushed, i;
231	unsigned long flags;
232	static DEFINE_SPINLOCK(gart_lock);
233
234	if (!amd_nb_has_feature(AMD_NB_GART))
235		return;
236
237	/* Avoid races between AGP and IOMMU. In theory it's not needed
238	   but I'm not sure if the hardware won't lose flush requests
239	   when another is pending. This whole thing is so expensive anyways
240	   that it doesn't matter to serialize more. -AK */
 
 
241	spin_lock_irqsave(&gart_lock, flags);
242	flushed = 0;
243	for (i = 0; i < amd_nb_num(); i++) {
244		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
245				       flush_words[i] | 1);
246		flushed++;
247	}
248	for (i = 0; i < amd_nb_num(); i++) {
249		u32 w;
250		/* Make sure the hardware actually executed the flush*/
251		for (;;) {
252			pci_read_config_dword(node_to_amd_nb(i)->misc,
253					      0x9c, &w);
254			if (!(w & 1))
255				break;
256			cpu_relax();
257		}
258	}
259	spin_unlock_irqrestore(&gart_lock, flags);
260	if (!flushed)
261		printk("nothing to flush?\n");
262}
263EXPORT_SYMBOL_GPL(amd_flush_garts);
264
265static __init int init_amd_nbs(void)
 
 
 
 
 
 
 
 
 
266{
267	int err = 0;
 
268
269	err = amd_cache_northbridges();
 
270
271	if (err < 0)
272		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
273
274	if (amd_cache_gart() < 0)
275		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
276		       "GART support disabled.\n");
277
278	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279}
280
281/* This has to go after the PCI subsystem */
282fs_initcall(init_amd_nbs);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Shared support code for AMD K8 northbridges and derivates.
  4 * Copyright 2006 Andi Kleen, SUSE Labs.
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/types.h>
 10#include <linux/slab.h>
 11#include <linux/init.h>
 12#include <linux/errno.h>
 13#include <linux/export.h>
 14#include <linux/spinlock.h>
 15#include <linux/pci_ids.h>
 16#include <asm/amd_nb.h>
 17
 18#define PCI_DEVICE_ID_AMD_17H_ROOT	0x1450
 19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT	0x15d0
 20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT	0x1480
 21#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT	0x1630
 22#define PCI_DEVICE_ID_AMD_17H_DF_F4	0x1464
 23#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
 24#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
 25#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
 26#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
 27#define PCI_DEVICE_ID_AMD_19H_DF_F4	0x1654
 28
 29/* Protect the PCI config register pairs used for SMN and DF indirect access. */
 30static DEFINE_MUTEX(smn_mutex);
 31
 32static u32 *flush_words;
 33
 34static const struct pci_device_id amd_root_ids[] = {
 35	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
 36	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
 37	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
 38	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
 39	{}
 40};
 41
 42#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
 43
 44static const struct pci_device_id amd_nb_misc_ids[] = {
 45	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 46	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 47	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 48	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 49	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 50	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 51	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 52	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 53	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 54	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
 55	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
 56	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
 57	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 58	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
 59	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
 60	{}
 61};
 
 62
 63static const struct pci_device_id amd_nb_link_ids[] = {
 64	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 65	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 66	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
 67	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 68	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 69	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 70	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
 71	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
 72	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
 73	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
 74	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
 75	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
 76	{}
 77};
 78
 79static const struct pci_device_id hygon_root_ids[] = {
 80	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
 81	{}
 82};
 83
 84static const struct pci_device_id hygon_nb_misc_ids[] = {
 85	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 86	{}
 87};
 88
 89static const struct pci_device_id hygon_nb_link_ids[] = {
 90	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 91	{}
 92};
 93
 94const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 95	{ 0x00, 0x18, 0x20 },
 96	{ 0xff, 0x00, 0x20 },
 97	{ 0xfe, 0x00, 0x20 },
 98	{ }
 99};
100
101static struct amd_northbridge_info amd_northbridges;
102
103u16 amd_nb_num(void)
104{
105	return amd_northbridges.num;
106}
107EXPORT_SYMBOL_GPL(amd_nb_num);
108
109bool amd_nb_has_feature(unsigned int feature)
110{
111	return ((amd_northbridges.flags & feature) == feature);
112}
113EXPORT_SYMBOL_GPL(amd_nb_has_feature);
114
115struct amd_northbridge *node_to_amd_nb(int node)
116{
117	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
118}
119EXPORT_SYMBOL_GPL(node_to_amd_nb);
120
121static struct pci_dev *next_northbridge(struct pci_dev *dev,
122					const struct pci_device_id *ids)
123{
124	do {
125		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
126		if (!dev)
127			break;
128	} while (!pci_match_id(ids, dev));
129	return dev;
130}
131
132static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
133{
134	struct pci_dev *root;
135	int err = -ENODEV;
136
137	if (node >= amd_northbridges.num)
138		goto out;
139
140	root = node_to_amd_nb(node)->root;
141	if (!root)
142		goto out;
143
144	mutex_lock(&smn_mutex);
145
146	err = pci_write_config_dword(root, 0x60, address);
147	if (err) {
148		pr_warn("Error programming SMN address 0x%x.\n", address);
149		goto out_unlock;
150	}
151
152	err = (write ? pci_write_config_dword(root, 0x64, *value)
153		     : pci_read_config_dword(root, 0x64, value));
154	if (err)
155		pr_warn("Error %s SMN address 0x%x.\n",
156			(write ? "writing to" : "reading from"), address);
157
158out_unlock:
159	mutex_unlock(&smn_mutex);
160
161out:
162	return err;
163}
164
165int amd_smn_read(u16 node, u32 address, u32 *value)
166{
167	return __amd_smn_rw(node, address, value, false);
168}
169EXPORT_SYMBOL_GPL(amd_smn_read);
170
171int amd_smn_write(u16 node, u32 address, u32 value)
172{
173	return __amd_smn_rw(node, address, &value, true);
174}
175EXPORT_SYMBOL_GPL(amd_smn_write);
176
177/*
178 * Data Fabric Indirect Access uses FICAA/FICAD.
179 *
180 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
181 * on the device's Instance Id and the PCI function and register offset of
182 * the desired register.
183 *
184 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
185 * and FICAD HI registers but so far we only need the LO register.
186 */
187int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
188{
189	struct pci_dev *F4;
190	u32 ficaa;
191	int err = -ENODEV;
192
193	if (node >= amd_northbridges.num)
194		goto out;
195
196	F4 = node_to_amd_nb(node)->link;
197	if (!F4)
198		goto out;
199
200	ficaa  = 1;
201	ficaa |= reg & 0x3FC;
202	ficaa |= (func & 0x7) << 11;
203	ficaa |= instance_id << 16;
204
205	mutex_lock(&smn_mutex);
206
207	err = pci_write_config_dword(F4, 0x5C, ficaa);
208	if (err) {
209		pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
210		goto out_unlock;
211	}
212
213	err = pci_read_config_dword(F4, 0x98, lo);
214	if (err)
215		pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
216
217out_unlock:
218	mutex_unlock(&smn_mutex);
219
220out:
221	return err;
222}
223EXPORT_SYMBOL_GPL(amd_df_indirect_read);
224
225int amd_cache_northbridges(void)
226{
227	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
228	const struct pci_device_id *link_ids = amd_nb_link_ids;
229	const struct pci_device_id *root_ids = amd_root_ids;
230	struct pci_dev *root, *misc, *link;
231	struct amd_northbridge *nb;
232	u16 roots_per_misc = 0;
233	u16 misc_count = 0;
234	u16 root_count = 0;
235	u16 i, j;
236
237	if (amd_northbridges.num)
238		return 0;
239
240	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
241		root_ids = hygon_root_ids;
242		misc_ids = hygon_nb_misc_ids;
243		link_ids = hygon_nb_link_ids;
244	}
245
246	misc = NULL;
247	while ((misc = next_northbridge(misc, misc_ids)) != NULL)
248		misc_count++;
249
250	if (!misc_count)
251		return -ENODEV;
252
253	root = NULL;
254	while ((root = next_northbridge(root, root_ids)) != NULL)
255		root_count++;
256
257	if (root_count) {
258		roots_per_misc = root_count / misc_count;
259
260		/*
261		 * There should be _exactly_ N roots for each DF/SMN
262		 * interface.
263		 */
264		if (!roots_per_misc || (root_count % roots_per_misc)) {
265			pr_info("Unsupported AMD DF/PCI configuration found\n");
266			return -ENODEV;
267		}
268	}
269
270	nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
271	if (!nb)
272		return -ENOMEM;
273
274	amd_northbridges.nb = nb;
275	amd_northbridges.num = misc_count;
276
277	link = misc = root = NULL;
278	for (i = 0; i < amd_northbridges.num; i++) {
279		node_to_amd_nb(i)->root = root =
280			next_northbridge(root, root_ids);
281		node_to_amd_nb(i)->misc = misc =
282			next_northbridge(misc, misc_ids);
283		node_to_amd_nb(i)->link = link =
284			next_northbridge(link, link_ids);
285
286		/*
287		 * If there are more PCI root devices than data fabric/
288		 * system management network interfaces, then the (N)
289		 * PCI roots per DF/SMN interface are functionally the
290		 * same (for DF/SMN access) and N-1 are redundant.  N-1
291		 * PCI roots should be skipped per DF/SMN interface so
292		 * the following DF/SMN interfaces get mapped to
293		 * correct PCI roots.
294		 */
295		for (j = 1; j < roots_per_misc; j++)
296			root = next_northbridge(root, root_ids);
297	}
298
299	if (amd_gart_present())
 
 
300		amd_northbridges.flags |= AMD_NB_GART;
301
302	/*
303	 * Check for L3 cache presence.
304	 */
305	if (!cpuid_edx(0x80000006))
306		return 0;
307
308	/*
309	 * Some CPU families support L3 Cache Index Disable. There are some
310	 * limitations because of E382 and E388 on family 0x10.
311	 */
312	if (boot_cpu_data.x86 == 0x10 &&
313	    boot_cpu_data.x86_model >= 0x8 &&
314	    (boot_cpu_data.x86_model > 0x9 ||
315	     boot_cpu_data.x86_stepping >= 0x1))
316		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
317
318	if (boot_cpu_data.x86 == 0x15)
319		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
320
321	/* L3 cache partitioning is supported on family 0x15 */
322	if (boot_cpu_data.x86 == 0x15)
323		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
324
325	return 0;
326}
327EXPORT_SYMBOL_GPL(amd_cache_northbridges);
328
329/*
330 * Ignores subdevice/subvendor but as far as I can figure out
331 * they're useless anyways
332 */
333bool __init early_is_amd_nb(u32 device)
334{
335	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
336	const struct pci_device_id *id;
337	u32 vendor = device & 0xffff;
338
339	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
340	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
341		return false;
342
343	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
344		misc_ids = hygon_nb_misc_ids;
345
346	device >>= 16;
347	for (id = misc_ids; id->vendor; id++)
348		if (vendor == id->vendor && device == id->device)
349			return true;
350	return false;
351}
352
353struct resource *amd_get_mmconfig_range(struct resource *res)
354{
355	u32 address;
356	u64 base, msr;
357	unsigned int segn_busn_bits;
358
359	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
360	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
361		return NULL;
362
363	/* assume all cpus from fam10h have mmconfig */
364	if (boot_cpu_data.x86 < 0x10)
365		return NULL;
366
367	address = MSR_FAM10H_MMIO_CONF_BASE;
368	rdmsrl(address, msr);
369
370	/* mmconfig is not enabled */
371	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
372		return NULL;
373
374	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
375
376	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
377			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
378
379	res->flags = IORESOURCE_MEM;
380	res->start = base;
381	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
382	return res;
383}
384
385int amd_get_subcaches(int cpu)
386{
387	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
388	unsigned int mask;
 
389
390	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
391		return 0;
392
393	pci_read_config_dword(link, 0x1d4, &mask);
394
395	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
 
396}
397
398int amd_set_subcaches(int cpu, unsigned long mask)
399{
400	static unsigned int reset, ban;
401	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
402	unsigned int reg;
403	int cuid;
404
405	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
406		return -EINVAL;
407
408	/* if necessary, collect reset state of L3 partitioning and BAN mode */
409	if (reset == 0) {
410		pci_read_config_dword(nb->link, 0x1d4, &reset);
411		pci_read_config_dword(nb->misc, 0x1b8, &ban);
412		ban &= 0x180000;
413	}
414
415	/* deactivate BAN mode if any subcaches are to be disabled */
416	if (mask != 0xf) {
417		pci_read_config_dword(nb->misc, 0x1b8, &reg);
418		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
419	}
420
421	cuid = cpu_data(cpu).cpu_core_id;
422	mask <<= 4 * cuid;
423	mask |= (0xf ^ (1 << cuid)) << 26;
424
425	pci_write_config_dword(nb->link, 0x1d4, mask);
426
427	/* reset BAN mode if L3 partitioning returned to reset state */
428	pci_read_config_dword(nb->link, 0x1d4, &reg);
429	if (reg == reset) {
430		pci_read_config_dword(nb->misc, 0x1b8, &reg);
431		reg &= ~0x180000;
432		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
433	}
434
435	return 0;
436}
437
438static void amd_cache_gart(void)
439{
440	u16 i;
441
442	if (!amd_nb_has_feature(AMD_NB_GART))
443		return;
444
445	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
446	if (!flush_words) {
447		amd_northbridges.flags &= ~AMD_NB_GART;
448		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
449		return;
450	}
 
 
 
451
452	for (i = 0; i != amd_northbridges.num; i++)
453		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
454}
455
456void amd_flush_garts(void)
457{
458	int flushed, i;
459	unsigned long flags;
460	static DEFINE_SPINLOCK(gart_lock);
461
462	if (!amd_nb_has_feature(AMD_NB_GART))
463		return;
464
465	/*
466	 * Avoid races between AGP and IOMMU. In theory it's not needed
467	 * but I'm not sure if the hardware won't lose flush requests
468	 * when another is pending. This whole thing is so expensive anyways
469	 * that it doesn't matter to serialize more. -AK
470	 */
471	spin_lock_irqsave(&gart_lock, flags);
472	flushed = 0;
473	for (i = 0; i < amd_northbridges.num; i++) {
474		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
475				       flush_words[i] | 1);
476		flushed++;
477	}
478	for (i = 0; i < amd_northbridges.num; i++) {
479		u32 w;
480		/* Make sure the hardware actually executed the flush*/
481		for (;;) {
482			pci_read_config_dword(node_to_amd_nb(i)->misc,
483					      0x9c, &w);
484			if (!(w & 1))
485				break;
486			cpu_relax();
487		}
488	}
489	spin_unlock_irqrestore(&gart_lock, flags);
490	if (!flushed)
491		pr_notice("nothing to flush?\n");
492}
493EXPORT_SYMBOL_GPL(amd_flush_garts);
494
495static void __fix_erratum_688(void *info)
496{
497#define MSR_AMD64_IC_CFG 0xC0011021
498
499	msr_set_bit(MSR_AMD64_IC_CFG, 3);
500	msr_set_bit(MSR_AMD64_IC_CFG, 14);
501}
502
503/* Apply erratum 688 fix so machines without a BIOS fix work. */
504static __init void fix_erratum_688(void)
505{
506	struct pci_dev *F4;
507	u32 val;
508
509	if (boot_cpu_data.x86 != 0x14)
510		return;
511
512	if (!amd_northbridges.num)
513		return;
514
515	F4 = node_to_amd_nb(0)->link;
516	if (!F4)
517		return;
518
519	if (pci_read_config_dword(F4, 0x164, &val))
520		return;
521
522	if (val & BIT(2))
523		return;
524
525	on_each_cpu(__fix_erratum_688, NULL, 0);
526
527	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
528}
529
530static __init int init_amd_nbs(void)
531{
532	amd_cache_northbridges();
533	amd_cache_gart();
534
535	fix_erratum_688();
536
537	return 0;
538}
539
540/* This has to go after the PCI subsystem */
541fs_initcall(init_amd_nbs);