Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
 
 
 
  5#include <linux/types.h>
  6#include <linux/slab.h>
  7#include <linux/init.h>
  8#include <linux/errno.h>
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 
 11#include <asm/amd_nb.h>
 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13static u32 *flush_words;
 14
 15const struct pci_device_id amd_nb_misc_ids[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 16	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 17	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 18	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19	{}
 20};
 21EXPORT_SYMBOL(amd_nb_misc_ids);
 22
 23static struct pci_device_id amd_nb_link_ids[] = {
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25	{}
 26};
 27
 28const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 29	{ 0x00, 0x18, 0x20 },
 30	{ 0xff, 0x00, 0x20 },
 31	{ 0xfe, 0x00, 0x20 },
 32	{ }
 33};
 34
 35struct amd_northbridge_info amd_northbridges;
 36EXPORT_SYMBOL(amd_northbridges);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38static struct pci_dev *next_northbridge(struct pci_dev *dev,
 39					const struct pci_device_id *ids)
 40{
 41	do {
 42		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 43		if (!dev)
 44			break;
 45	} while (!pci_match_id(ids, dev));
 46	return dev;
 47}
 48
 49int amd_cache_northbridges(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50{
 51	u16 i = 0;
 
 
 
 52	struct amd_northbridge *nb;
 53	struct pci_dev *misc, *link;
 
 
 
 54
 55	if (amd_nb_num())
 56		return 0;
 57
 
 
 
 
 
 
 58	misc = NULL;
 59	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 60		i++;
 61
 62	if (i == 0)
 63		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64
 65	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 66	if (!nb)
 67		return -ENOMEM;
 68
 69	amd_northbridges.nb = nb;
 70	amd_northbridges.num = i;
 71
 72	link = misc = NULL;
 73	for (i = 0; i != amd_nb_num(); i++) {
 
 
 74		node_to_amd_nb(i)->misc = misc =
 75			next_northbridge(misc, amd_nb_misc_ids);
 76		node_to_amd_nb(i)->link = link =
 77			next_northbridge(link, amd_nb_link_ids);
 78        }
 79
 80	/* some CPU families (e.g. family 0x11) do not support GART */
 81	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
 82	    boot_cpu_data.x86 == 0x15)
 
 
 
 
 
 
 
 
 
 
 
 83		amd_northbridges.flags |= AMD_NB_GART;
 84
 85	/*
 
 
 
 
 
 
 86	 * Some CPU families support L3 Cache Index Disable. There are some
 87	 * limitations because of E382 and E388 on family 0x10.
 88	 */
 89	if (boot_cpu_data.x86 == 0x10 &&
 90	    boot_cpu_data.x86_model >= 0x8 &&
 91	    (boot_cpu_data.x86_model > 0x9 ||
 92	     boot_cpu_data.x86_mask >= 0x1))
 93		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 94
 95	if (boot_cpu_data.x86 == 0x15)
 96		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 97
 98	/* L3 cache partitioning is supported on family 0x15 */
 99	if (boot_cpu_data.x86 == 0x15)
100		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101
102	return 0;
103}
104EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105
106/*
107 * Ignores subdevice/subvendor but as far as I can figure out
108 * they're useless anyways
109 */
110bool __init early_is_amd_nb(u32 device)
111{
 
112	const struct pci_device_id *id;
113	u32 vendor = device & 0xffff;
114
 
 
 
 
 
 
 
115	device >>= 16;
116	for (id = amd_nb_misc_ids; id->vendor; id++)
117		if (vendor == id->vendor && device == id->device)
118			return true;
119	return false;
120}
121
122struct resource *amd_get_mmconfig_range(struct resource *res)
123{
124	u32 address;
125	u64 base, msr;
126	unsigned segn_busn_bits;
127
128	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
 
129		return NULL;
130
131	/* assume all cpus from fam10h have mmconfig */
132        if (boot_cpu_data.x86 < 0x10)
133		return NULL;
134
135	address = MSR_FAM10H_MMIO_CONF_BASE;
136	rdmsrl(address, msr);
137
138	/* mmconfig is not enabled */
139	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
140		return NULL;
141
142	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
143
144	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
145			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
146
147	res->flags = IORESOURCE_MEM;
148	res->start = base;
149	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
150	return res;
151}
152
153int amd_get_subcaches(int cpu)
154{
155	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
156	unsigned int mask;
157	int cuid;
158
159	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
160		return 0;
161
162	pci_read_config_dword(link, 0x1d4, &mask);
163
164	cuid = cpu_data(cpu).compute_unit_id;
165	return (mask >> (4 * cuid)) & 0xf;
166}
167
168int amd_set_subcaches(int cpu, int mask)
169{
170	static unsigned int reset, ban;
171	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
172	unsigned int reg;
173	int cuid;
174
175	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
176		return -EINVAL;
177
178	/* if necessary, collect reset state of L3 partitioning and BAN mode */
179	if (reset == 0) {
180		pci_read_config_dword(nb->link, 0x1d4, &reset);
181		pci_read_config_dword(nb->misc, 0x1b8, &ban);
182		ban &= 0x180000;
183	}
184
185	/* deactivate BAN mode if any subcaches are to be disabled */
186	if (mask != 0xf) {
187		pci_read_config_dword(nb->misc, 0x1b8, &reg);
188		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
189	}
190
191	cuid = cpu_data(cpu).compute_unit_id;
192	mask <<= 4 * cuid;
193	mask |= (0xf ^ (1 << cuid)) << 26;
194
195	pci_write_config_dword(nb->link, 0x1d4, mask);
196
197	/* reset BAN mode if L3 partitioning returned to reset state */
198	pci_read_config_dword(nb->link, 0x1d4, &reg);
199	if (reg == reset) {
200		pci_read_config_dword(nb->misc, 0x1b8, &reg);
201		reg &= ~0x180000;
202		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
203	}
204
205	return 0;
206}
207
208static int amd_cache_gart(void)
209{
210	u16 i;
211
212       if (!amd_nb_has_feature(AMD_NB_GART))
213               return 0;
214
215       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
216       if (!flush_words) {
217               amd_northbridges.flags &= ~AMD_NB_GART;
218               return -ENOMEM;
219       }
220
221       for (i = 0; i != amd_nb_num(); i++)
222               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
223                                     &flush_words[i]);
224
225       return 0;
 
226}
227
228void amd_flush_garts(void)
229{
230	int flushed, i;
231	unsigned long flags;
232	static DEFINE_SPINLOCK(gart_lock);
233
234	if (!amd_nb_has_feature(AMD_NB_GART))
235		return;
236
237	/* Avoid races between AGP and IOMMU. In theory it's not needed
238	   but I'm not sure if the hardware won't lose flush requests
239	   when another is pending. This whole thing is so expensive anyways
240	   that it doesn't matter to serialize more. -AK */
 
 
241	spin_lock_irqsave(&gart_lock, flags);
242	flushed = 0;
243	for (i = 0; i < amd_nb_num(); i++) {
244		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
245				       flush_words[i] | 1);
246		flushed++;
247	}
248	for (i = 0; i < amd_nb_num(); i++) {
249		u32 w;
250		/* Make sure the hardware actually executed the flush*/
251		for (;;) {
252			pci_read_config_dword(node_to_amd_nb(i)->misc,
253					      0x9c, &w);
254			if (!(w & 1))
255				break;
256			cpu_relax();
257		}
258	}
259	spin_unlock_irqrestore(&gart_lock, flags);
260	if (!flushed)
261		printk("nothing to flush?\n");
262}
263EXPORT_SYMBOL_GPL(amd_flush_garts);
264
265static __init int init_amd_nbs(void)
266{
267	int err = 0;
268
269	err = amd_cache_northbridges();
 
 
270
271	if (err < 0)
272		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
 
 
 
273
274	if (amd_cache_gart() < 0)
275		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
276		       "GART support disabled.\n");
277
278	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279}
280
281/* This has to go after the PCI subsystem */
282fs_initcall(init_amd_nbs);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Shared support code for AMD K8 northbridges and derivatives.
  4 * Copyright 2006 Andi Kleen, SUSE Labs.
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/types.h>
 10#include <linux/slab.h>
 11#include <linux/init.h>
 12#include <linux/errno.h>
 13#include <linux/export.h>
 14#include <linux/spinlock.h>
 15#include <linux/pci_ids.h>
 16#include <asm/amd_nb.h>
 17
 18#define PCI_DEVICE_ID_AMD_17H_ROOT	0x1450
 19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT	0x15d0
 20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT	0x1480
 21#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT	0x1630
 22#define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT	0x14b5
 23#define PCI_DEVICE_ID_AMD_19H_M10H_ROOT	0x14a4
 24#define PCI_DEVICE_ID_AMD_19H_M60H_ROOT	0x14d8
 25#define PCI_DEVICE_ID_AMD_19H_M70H_ROOT	0x14e8
 26#define PCI_DEVICE_ID_AMD_17H_DF_F4	0x1464
 27#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
 28#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
 29#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
 30#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
 31#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4 0x1728
 32#define PCI_DEVICE_ID_AMD_19H_DF_F4	0x1654
 33#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4 0x14b1
 34#define PCI_DEVICE_ID_AMD_19H_M40H_ROOT	0x14b5
 35#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d
 36#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
 37#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
 38#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
 39
 40/* Protect the PCI config register pairs used for SMN. */
 41static DEFINE_MUTEX(smn_mutex);
 42
 43static u32 *flush_words;
 44
 45static const struct pci_device_id amd_root_ids[] = {
 46	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
 47	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
 48	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
 49	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
 50	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
 51	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
 52	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
 53	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
 54	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
 55	{}
 56};
 57
 58#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
 59
 60static const struct pci_device_id amd_nb_misc_ids[] = {
 61	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 62	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 63	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 64	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 65	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 66	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 67	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 68	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 69	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 70	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
 71	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
 72	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
 73	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
 74	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 75	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
 76	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
 77	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
 78	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
 79	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
 80	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
 81	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
 82	{}
 83};
 
 84
 85static const struct pci_device_id amd_nb_link_ids[] = {
 86	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 87	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 88	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
 89	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 90	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 91	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 92	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
 93	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
 94	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
 95	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
 96	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
 97	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
 98	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
 99	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
100	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
101	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
102	{}
103};
104
105static const struct pci_device_id hygon_root_ids[] = {
106	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
107	{}
108};
109
110static const struct pci_device_id hygon_nb_misc_ids[] = {
111	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
112	{}
113};
114
115static const struct pci_device_id hygon_nb_link_ids[] = {
116	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
117	{}
118};
119
120const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
121	{ 0x00, 0x18, 0x20 },
122	{ 0xff, 0x00, 0x20 },
123	{ 0xfe, 0x00, 0x20 },
124	{ }
125};
126
127static struct amd_northbridge_info amd_northbridges;
128
129u16 amd_nb_num(void)
130{
131	return amd_northbridges.num;
132}
133EXPORT_SYMBOL_GPL(amd_nb_num);
134
135bool amd_nb_has_feature(unsigned int feature)
136{
137	return ((amd_northbridges.flags & feature) == feature);
138}
139EXPORT_SYMBOL_GPL(amd_nb_has_feature);
140
141struct amd_northbridge *node_to_amd_nb(int node)
142{
143	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
144}
145EXPORT_SYMBOL_GPL(node_to_amd_nb);
146
147static struct pci_dev *next_northbridge(struct pci_dev *dev,
148					const struct pci_device_id *ids)
149{
150	do {
151		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
152		if (!dev)
153			break;
154	} while (!pci_match_id(ids, dev));
155	return dev;
156}
157
158static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
159{
160	struct pci_dev *root;
161	int err = -ENODEV;
162
163	if (node >= amd_northbridges.num)
164		goto out;
165
166	root = node_to_amd_nb(node)->root;
167	if (!root)
168		goto out;
169
170	mutex_lock(&smn_mutex);
171
172	err = pci_write_config_dword(root, 0x60, address);
173	if (err) {
174		pr_warn("Error programming SMN address 0x%x.\n", address);
175		goto out_unlock;
176	}
177
178	err = (write ? pci_write_config_dword(root, 0x64, *value)
179		     : pci_read_config_dword(root, 0x64, value));
180	if (err)
181		pr_warn("Error %s SMN address 0x%x.\n",
182			(write ? "writing to" : "reading from"), address);
183
184out_unlock:
185	mutex_unlock(&smn_mutex);
186
187out:
188	return err;
189}
190
191int amd_smn_read(u16 node, u32 address, u32 *value)
192{
193	return __amd_smn_rw(node, address, value, false);
194}
195EXPORT_SYMBOL_GPL(amd_smn_read);
196
197int amd_smn_write(u16 node, u32 address, u32 value)
198{
199	return __amd_smn_rw(node, address, &value, true);
200}
201EXPORT_SYMBOL_GPL(amd_smn_write);
202
203
204static int amd_cache_northbridges(void)
205{
206	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
207	const struct pci_device_id *link_ids = amd_nb_link_ids;
208	const struct pci_device_id *root_ids = amd_root_ids;
209	struct pci_dev *root, *misc, *link;
210	struct amd_northbridge *nb;
211	u16 roots_per_misc = 0;
212	u16 misc_count = 0;
213	u16 root_count = 0;
214	u16 i, j;
215
216	if (amd_northbridges.num)
217		return 0;
218
219	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
220		root_ids = hygon_root_ids;
221		misc_ids = hygon_nb_misc_ids;
222		link_ids = hygon_nb_link_ids;
223	}
224
225	misc = NULL;
226	while ((misc = next_northbridge(misc, misc_ids)))
227		misc_count++;
228
229	if (!misc_count)
230		return -ENODEV;
231
232	root = NULL;
233	while ((root = next_northbridge(root, root_ids)))
234		root_count++;
235
236	if (root_count) {
237		roots_per_misc = root_count / misc_count;
238
239		/*
240		 * There should be _exactly_ N roots for each DF/SMN
241		 * interface.
242		 */
243		if (!roots_per_misc || (root_count % roots_per_misc)) {
244			pr_info("Unsupported AMD DF/PCI configuration found\n");
245			return -ENODEV;
246		}
247	}
248
249	nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
250	if (!nb)
251		return -ENOMEM;
252
253	amd_northbridges.nb = nb;
254	amd_northbridges.num = misc_count;
255
256	link = misc = root = NULL;
257	for (i = 0; i < amd_northbridges.num; i++) {
258		node_to_amd_nb(i)->root = root =
259			next_northbridge(root, root_ids);
260		node_to_amd_nb(i)->misc = misc =
261			next_northbridge(misc, misc_ids);
262		node_to_amd_nb(i)->link = link =
263			next_northbridge(link, link_ids);
 
264
265		/*
266		 * If there are more PCI root devices than data fabric/
267		 * system management network interfaces, then the (N)
268		 * PCI roots per DF/SMN interface are functionally the
269		 * same (for DF/SMN access) and N-1 are redundant.  N-1
270		 * PCI roots should be skipped per DF/SMN interface so
271		 * the following DF/SMN interfaces get mapped to
272		 * correct PCI roots.
273		 */
274		for (j = 1; j < roots_per_misc; j++)
275			root = next_northbridge(root, root_ids);
276	}
277
278	if (amd_gart_present())
279		amd_northbridges.flags |= AMD_NB_GART;
280
281	/*
282	 * Check for L3 cache presence.
283	 */
284	if (!cpuid_edx(0x80000006))
285		return 0;
286
287	/*
288	 * Some CPU families support L3 Cache Index Disable. There are some
289	 * limitations because of E382 and E388 on family 0x10.
290	 */
291	if (boot_cpu_data.x86 == 0x10 &&
292	    boot_cpu_data.x86_model >= 0x8 &&
293	    (boot_cpu_data.x86_model > 0x9 ||
294	     boot_cpu_data.x86_stepping >= 0x1))
295		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
296
297	if (boot_cpu_data.x86 == 0x15)
298		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
299
300	/* L3 cache partitioning is supported on family 0x15 */
301	if (boot_cpu_data.x86 == 0x15)
302		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
303
304	return 0;
305}
 
306
307/*
308 * Ignores subdevice/subvendor but as far as I can figure out
309 * they're useless anyways
310 */
311bool __init early_is_amd_nb(u32 device)
312{
313	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
314	const struct pci_device_id *id;
315	u32 vendor = device & 0xffff;
316
317	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
318	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
319		return false;
320
321	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
322		misc_ids = hygon_nb_misc_ids;
323
324	device >>= 16;
325	for (id = misc_ids; id->vendor; id++)
326		if (vendor == id->vendor && device == id->device)
327			return true;
328	return false;
329}
330
331struct resource *amd_get_mmconfig_range(struct resource *res)
332{
333	u32 address;
334	u64 base, msr;
335	unsigned int segn_busn_bits;
336
337	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
338	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
339		return NULL;
340
341	/* assume all cpus from fam10h have mmconfig */
342	if (boot_cpu_data.x86 < 0x10)
343		return NULL;
344
345	address = MSR_FAM10H_MMIO_CONF_BASE;
346	rdmsrl(address, msr);
347
348	/* mmconfig is not enabled */
349	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
350		return NULL;
351
352	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
353
354	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
355			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
356
357	res->flags = IORESOURCE_MEM;
358	res->start = base;
359	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
360	return res;
361}
362
363int amd_get_subcaches(int cpu)
364{
365	struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
366	unsigned int mask;
 
367
368	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
369		return 0;
370
371	pci_read_config_dword(link, 0x1d4, &mask);
372
373	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
 
374}
375
376int amd_set_subcaches(int cpu, unsigned long mask)
377{
378	static unsigned int reset, ban;
379	struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
380	unsigned int reg;
381	int cuid;
382
383	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
384		return -EINVAL;
385
386	/* if necessary, collect reset state of L3 partitioning and BAN mode */
387	if (reset == 0) {
388		pci_read_config_dword(nb->link, 0x1d4, &reset);
389		pci_read_config_dword(nb->misc, 0x1b8, &ban);
390		ban &= 0x180000;
391	}
392
393	/* deactivate BAN mode if any subcaches are to be disabled */
394	if (mask != 0xf) {
395		pci_read_config_dword(nb->misc, 0x1b8, &reg);
396		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
397	}
398
399	cuid = cpu_data(cpu).cpu_core_id;
400	mask <<= 4 * cuid;
401	mask |= (0xf ^ (1 << cuid)) << 26;
402
403	pci_write_config_dword(nb->link, 0x1d4, mask);
404
405	/* reset BAN mode if L3 partitioning returned to reset state */
406	pci_read_config_dword(nb->link, 0x1d4, &reg);
407	if (reg == reset) {
408		pci_read_config_dword(nb->misc, 0x1b8, &reg);
409		reg &= ~0x180000;
410		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
411	}
412
413	return 0;
414}
415
416static void amd_cache_gart(void)
417{
418	u16 i;
419
420	if (!amd_nb_has_feature(AMD_NB_GART))
421		return;
422
423	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
424	if (!flush_words) {
425		amd_northbridges.flags &= ~AMD_NB_GART;
426		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
427		return;
428	}
 
 
 
429
430	for (i = 0; i != amd_northbridges.num; i++)
431		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
432}
433
434void amd_flush_garts(void)
435{
436	int flushed, i;
437	unsigned long flags;
438	static DEFINE_SPINLOCK(gart_lock);
439
440	if (!amd_nb_has_feature(AMD_NB_GART))
441		return;
442
443	/*
444	 * Avoid races between AGP and IOMMU. In theory it's not needed
445	 * but I'm not sure if the hardware won't lose flush requests
446	 * when another is pending. This whole thing is so expensive anyways
447	 * that it doesn't matter to serialize more. -AK
448	 */
449	spin_lock_irqsave(&gart_lock, flags);
450	flushed = 0;
451	for (i = 0; i < amd_northbridges.num; i++) {
452		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
453				       flush_words[i] | 1);
454		flushed++;
455	}
456	for (i = 0; i < amd_northbridges.num; i++) {
457		u32 w;
458		/* Make sure the hardware actually executed the flush*/
459		for (;;) {
460			pci_read_config_dword(node_to_amd_nb(i)->misc,
461					      0x9c, &w);
462			if (!(w & 1))
463				break;
464			cpu_relax();
465		}
466	}
467	spin_unlock_irqrestore(&gart_lock, flags);
468	if (!flushed)
469		pr_notice("nothing to flush?\n");
470}
471EXPORT_SYMBOL_GPL(amd_flush_garts);
472
473static void __fix_erratum_688(void *info)
474{
475#define MSR_AMD64_IC_CFG 0xC0011021
476
477	msr_set_bit(MSR_AMD64_IC_CFG, 3);
478	msr_set_bit(MSR_AMD64_IC_CFG, 14);
479}
480
481/* Apply erratum 688 fix so machines without a BIOS fix work. */
482static __init void fix_erratum_688(void)
483{
484	struct pci_dev *F4;
485	u32 val;
486
487	if (boot_cpu_data.x86 != 0x14)
488		return;
 
489
490	if (!amd_northbridges.num)
491		return;
492
493	F4 = node_to_amd_nb(0)->link;
494	if (!F4)
495		return;
496
497	if (pci_read_config_dword(F4, 0x164, &val))
498		return;
499
500	if (val & BIT(2))
501		return;
502
503	on_each_cpu(__fix_erratum_688, NULL, 0);
504
505	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
506}
507
508static __init int init_amd_nbs(void)
509{
510	amd_cache_northbridges();
511	amd_cache_gart();
512
513	fix_erratum_688();
514
515	return 0;
516}
517
518/* This has to go after the PCI subsystem */
519fs_initcall(init_amd_nbs);