Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Shared support code for AMD K8 northbridges and derivatives.
  4 * Copyright 2006 Andi Kleen, SUSE Labs.
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/types.h>
 10#include <linux/slab.h>
 11#include <linux/init.h>
 12#include <linux/errno.h>
 13#include <linux/export.h>
 14#include <linux/spinlock.h>
 15#include <linux/pci_ids.h>
 16#include <asm/amd_nb.h>
 17
 18#define PCI_DEVICE_ID_AMD_17H_ROOT		0x1450
 19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT		0x15d0
 20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT		0x1480
 21#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT		0x1630
 22#define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT		0x14b5
 23#define PCI_DEVICE_ID_AMD_19H_M10H_ROOT		0x14a4
 24#define PCI_DEVICE_ID_AMD_19H_M40H_ROOT		0x14b5
 25#define PCI_DEVICE_ID_AMD_19H_M60H_ROOT		0x14d8
 26#define PCI_DEVICE_ID_AMD_19H_M70H_ROOT		0x14e8
 27#define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT		0x153a
 28#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT		0x1507
 29#define PCI_DEVICE_ID_AMD_MI200_ROOT		0x14bb
 30#define PCI_DEVICE_ID_AMD_MI300_ROOT		0x14f8
 31
 32#define PCI_DEVICE_ID_AMD_17H_DF_F4		0x1464
 33#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4	0x15ec
 34#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4	0x1494
 35#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4	0x144c
 36#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4	0x1444
 37#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4	0x1728
 38#define PCI_DEVICE_ID_AMD_19H_DF_F4		0x1654
 39#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4	0x14b1
 40#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4	0x167d
 41#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4	0x166e
 42#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4	0x14e4
 43#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4	0x14f4
 44#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4	0x12fc
 45#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4	0x12c4
 46#define PCI_DEVICE_ID_AMD_MI200_DF_F4		0x14d4
 47#define PCI_DEVICE_ID_AMD_MI300_DF_F4		0x152c
 48
 49/* Protect the PCI config register pairs used for SMN. */
 50static DEFINE_MUTEX(smn_mutex);
 51
 52static u32 *flush_words;
 53
 54static const struct pci_device_id amd_root_ids[] = {
 55	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
 56	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
 57	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
 58	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
 59	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
 60	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
 61	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
 62	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
 63	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
 64	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
 65	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
 66	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
 67	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_ROOT) },
 68	{}
 69};
 70
 71#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
 72
 73static const struct pci_device_id amd_nb_misc_ids[] = {
 74	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 75	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 76	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 77	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 78	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 79	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 80	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 81	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 82	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 83	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
 84	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
 85	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
 86	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
 87	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 88	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
 89	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
 90	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
 91	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
 92	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
 93	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
 94	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
 95	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
 96	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
 97	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
 98	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
 99	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) },
100	{}
101};
 
102
103static const struct pci_device_id amd_nb_link_ids[] = {
104	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
105	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
106	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
107	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
108	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
109	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
110	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
111	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
112	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
113	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
114	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
115	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
116	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
117	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
118	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
119	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
120	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
121	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
122	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
123	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
124	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
125	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F4) },
126	{}
127};
128
129static const struct pci_device_id hygon_root_ids[] = {
130	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
131	{}
132};
133
134static const struct pci_device_id hygon_nb_misc_ids[] = {
135	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
136	{}
137};
138
139static const struct pci_device_id hygon_nb_link_ids[] = {
140	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
141	{}
142};
143
144const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
145	{ 0x00, 0x18, 0x20 },
146	{ 0xff, 0x00, 0x20 },
147	{ 0xfe, 0x00, 0x20 },
148	{ }
149};
150
151static struct amd_northbridge_info amd_northbridges;
152
153u16 amd_nb_num(void)
154{
155	return amd_northbridges.num;
156}
157EXPORT_SYMBOL_GPL(amd_nb_num);
158
159bool amd_nb_has_feature(unsigned int feature)
160{
161	return ((amd_northbridges.flags & feature) == feature);
162}
163EXPORT_SYMBOL_GPL(amd_nb_has_feature);
164
165struct amd_northbridge *node_to_amd_nb(int node)
166{
167	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
168}
169EXPORT_SYMBOL_GPL(node_to_amd_nb);
170
171static struct pci_dev *next_northbridge(struct pci_dev *dev,
172					const struct pci_device_id *ids)
173{
174	do {
175		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
176		if (!dev)
177			break;
178	} while (!pci_match_id(ids, dev));
179	return dev;
180}
181
182static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
183{
184	struct pci_dev *root;
185	int err = -ENODEV;
186
187	if (node >= amd_northbridges.num)
188		goto out;
189
190	root = node_to_amd_nb(node)->root;
191	if (!root)
192		goto out;
193
194	mutex_lock(&smn_mutex);
195
196	err = pci_write_config_dword(root, 0x60, address);
197	if (err) {
198		pr_warn("Error programming SMN address 0x%x.\n", address);
199		goto out_unlock;
200	}
201
202	err = (write ? pci_write_config_dword(root, 0x64, *value)
203		     : pci_read_config_dword(root, 0x64, value));
204	if (err)
205		pr_warn("Error %s SMN address 0x%x.\n",
206			(write ? "writing to" : "reading from"), address);
207
208out_unlock:
209	mutex_unlock(&smn_mutex);
210
211out:
212	return err;
213}
214
215int amd_smn_read(u16 node, u32 address, u32 *value)
216{
217	return __amd_smn_rw(node, address, value, false);
218}
219EXPORT_SYMBOL_GPL(amd_smn_read);
220
221int amd_smn_write(u16 node, u32 address, u32 value)
222{
223	return __amd_smn_rw(node, address, &value, true);
224}
225EXPORT_SYMBOL_GPL(amd_smn_write);
226
227
228static int amd_cache_northbridges(void)
229{
230	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
231	const struct pci_device_id *link_ids = amd_nb_link_ids;
232	const struct pci_device_id *root_ids = amd_root_ids;
233	struct pci_dev *root, *misc, *link;
234	struct amd_northbridge *nb;
235	u16 roots_per_misc = 0;
236	u16 misc_count = 0;
237	u16 root_count = 0;
238	u16 i, j;
239
240	if (amd_northbridges.num)
241		return 0;
242
243	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
244		root_ids = hygon_root_ids;
245		misc_ids = hygon_nb_misc_ids;
246		link_ids = hygon_nb_link_ids;
247	}
248
249	misc = NULL;
250	while ((misc = next_northbridge(misc, misc_ids)))
251		misc_count++;
252
253	if (!misc_count)
254		return -ENODEV;
255
256	root = NULL;
257	while ((root = next_northbridge(root, root_ids)))
258		root_count++;
259
260	if (root_count) {
261		roots_per_misc = root_count / misc_count;
262
263		/*
264		 * There should be _exactly_ N roots for each DF/SMN
265		 * interface.
266		 */
267		if (!roots_per_misc || (root_count % roots_per_misc)) {
268			pr_info("Unsupported AMD DF/PCI configuration found\n");
269			return -ENODEV;
270		}
271	}
272
273	nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
274	if (!nb)
275		return -ENOMEM;
276
277	amd_northbridges.nb = nb;
278	amd_northbridges.num = misc_count;
279
280	link = misc = root = NULL;
281	for (i = 0; i < amd_northbridges.num; i++) {
282		node_to_amd_nb(i)->root = root =
283			next_northbridge(root, root_ids);
284		node_to_amd_nb(i)->misc = misc =
285			next_northbridge(misc, misc_ids);
286		node_to_amd_nb(i)->link = link =
287			next_northbridge(link, link_ids);
288
289		/*
290		 * If there are more PCI root devices than data fabric/
291		 * system management network interfaces, then the (N)
292		 * PCI roots per DF/SMN interface are functionally the
293		 * same (for DF/SMN access) and N-1 are redundant.  N-1
294		 * PCI roots should be skipped per DF/SMN interface so
295		 * the following DF/SMN interfaces get mapped to
296		 * correct PCI roots.
297		 */
298		for (j = 1; j < roots_per_misc; j++)
299			root = next_northbridge(root, root_ids);
300	}
301
302	if (amd_gart_present())
 
 
303		amd_northbridges.flags |= AMD_NB_GART;
304
305	/*
306	 * Check for L3 cache presence.
307	 */
308	if (!cpuid_edx(0x80000006))
309		return 0;
310
311	/*
312	 * Some CPU families support L3 Cache Index Disable. There are some
313	 * limitations because of E382 and E388 on family 0x10.
314	 */
315	if (boot_cpu_data.x86 == 0x10 &&
316	    boot_cpu_data.x86_model >= 0x8 &&
317	    (boot_cpu_data.x86_model > 0x9 ||
318	     boot_cpu_data.x86_stepping >= 0x1))
319		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
320
321	if (boot_cpu_data.x86 == 0x15)
322		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
323
324	/* L3 cache partitioning is supported on family 0x15 */
325	if (boot_cpu_data.x86 == 0x15)
326		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
327
328	return 0;
329}
 
330
331/*
332 * Ignores subdevice/subvendor but as far as I can figure out
333 * they're useless anyways
334 */
335bool __init early_is_amd_nb(u32 device)
336{
337	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
338	const struct pci_device_id *id;
339	u32 vendor = device & 0xffff;
340
341	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
342	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
343		return false;
344
345	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
346		misc_ids = hygon_nb_misc_ids;
347
348	device >>= 16;
349	for (id = misc_ids; id->vendor; id++)
350		if (vendor == id->vendor && device == id->device)
351			return true;
352	return false;
353}
354
355struct resource *amd_get_mmconfig_range(struct resource *res)
356{
357	u32 address;
358	u64 base, msr;
359	unsigned int segn_busn_bits;
360
361	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
362	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
363		return NULL;
364
365	/* assume all cpus from fam10h have mmconfig */
366	if (boot_cpu_data.x86 < 0x10)
367		return NULL;
368
369	address = MSR_FAM10H_MMIO_CONF_BASE;
370	rdmsrl(address, msr);
371
372	/* mmconfig is not enabled */
373	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
374		return NULL;
375
376	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
377
378	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
379			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
380
381	res->flags = IORESOURCE_MEM;
382	res->start = base;
383	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
384	return res;
385}
386
387int amd_get_subcaches(int cpu)
388{
389	struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
390	unsigned int mask;
 
391
392	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
393		return 0;
394
395	pci_read_config_dword(link, 0x1d4, &mask);
396
397	return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf;
 
398}
399
400int amd_set_subcaches(int cpu, unsigned long mask)
401{
402	static unsigned int reset, ban;
403	struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
404	unsigned int reg;
405	int cuid;
406
407	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
408		return -EINVAL;
409
410	/* if necessary, collect reset state of L3 partitioning and BAN mode */
411	if (reset == 0) {
412		pci_read_config_dword(nb->link, 0x1d4, &reset);
413		pci_read_config_dword(nb->misc, 0x1b8, &ban);
414		ban &= 0x180000;
415	}
416
417	/* deactivate BAN mode if any subcaches are to be disabled */
418	if (mask != 0xf) {
419		pci_read_config_dword(nb->misc, 0x1b8, &reg);
420		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
421	}
422
423	cuid = cpu_data(cpu).topo.core_id;
424	mask <<= 4 * cuid;
425	mask |= (0xf ^ (1 << cuid)) << 26;
426
427	pci_write_config_dword(nb->link, 0x1d4, mask);
428
429	/* reset BAN mode if L3 partitioning returned to reset state */
430	pci_read_config_dword(nb->link, 0x1d4, &reg);
431	if (reg == reset) {
432		pci_read_config_dword(nb->misc, 0x1b8, &reg);
433		reg &= ~0x180000;
434		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
435	}
436
437	return 0;
438}
439
440static void amd_cache_gart(void)
441{
442	u16 i;
443
444	if (!amd_nb_has_feature(AMD_NB_GART))
445		return;
446
447	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
448	if (!flush_words) {
449		amd_northbridges.flags &= ~AMD_NB_GART;
450		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
451		return;
452	}
 
 
 
453
454	for (i = 0; i != amd_northbridges.num; i++)
455		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
456}
457
458void amd_flush_garts(void)
459{
460	int flushed, i;
461	unsigned long flags;
462	static DEFINE_SPINLOCK(gart_lock);
463
464	if (!amd_nb_has_feature(AMD_NB_GART))
465		return;
466
467	/*
468	 * Avoid races between AGP and IOMMU. In theory it's not needed
469	 * but I'm not sure if the hardware won't lose flush requests
470	 * when another is pending. This whole thing is so expensive anyways
471	 * that it doesn't matter to serialize more. -AK
472	 */
473	spin_lock_irqsave(&gart_lock, flags);
474	flushed = 0;
475	for (i = 0; i < amd_northbridges.num; i++) {
476		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
477				       flush_words[i] | 1);
478		flushed++;
479	}
480	for (i = 0; i < amd_northbridges.num; i++) {
481		u32 w;
482		/* Make sure the hardware actually executed the flush*/
483		for (;;) {
484			pci_read_config_dword(node_to_amd_nb(i)->misc,
485					      0x9c, &w);
486			if (!(w & 1))
487				break;
488			cpu_relax();
489		}
490	}
491	spin_unlock_irqrestore(&gart_lock, flags);
492	if (!flushed)
493		pr_notice("nothing to flush?\n");
494}
495EXPORT_SYMBOL_GPL(amd_flush_garts);
496
497static void __fix_erratum_688(void *info)
498{
499#define MSR_AMD64_IC_CFG 0xC0011021
500
501	msr_set_bit(MSR_AMD64_IC_CFG, 3);
502	msr_set_bit(MSR_AMD64_IC_CFG, 14);
503}
504
505/* Apply erratum 688 fix so machines without a BIOS fix work. */
506static __init void fix_erratum_688(void)
507{
508	struct pci_dev *F4;
509	u32 val;
510
511	if (boot_cpu_data.x86 != 0x14)
512		return;
513
514	if (!amd_northbridges.num)
515		return;
516
517	F4 = node_to_amd_nb(0)->link;
518	if (!F4)
519		return;
520
521	if (pci_read_config_dword(F4, 0x164, &val))
522		return;
523
524	if (val & BIT(2))
525		return;
526
527	on_each_cpu(__fix_erratum_688, NULL, 0);
528
529	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
530}
531
532static __init int init_amd_nbs(void)
533{
534	amd_cache_northbridges();
535	amd_cache_gart();
536
537	fix_erratum_688();
 
538
539	return 0;
540}
541
542/* This has to go after the PCI subsystem */
543fs_initcall(init_amd_nbs);
v3.15
 
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
  5
  6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7
  8#include <linux/types.h>
  9#include <linux/slab.h>
 10#include <linux/init.h>
 11#include <linux/errno.h>
 12#include <linux/module.h>
 13#include <linux/spinlock.h>
 
 14#include <asm/amd_nb.h>
 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 16static u32 *flush_words;
 17
 18const struct pci_device_id amd_nb_misc_ids[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 20	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 21	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 22	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 23	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 25	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26	{}
 27};
 28EXPORT_SYMBOL(amd_nb_misc_ids);
 29
 30static const struct pci_device_id amd_nb_link_ids[] = {
 31	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 32	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 
 33	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 34	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35	{}
 36};
 37
 38const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 39	{ 0x00, 0x18, 0x20 },
 40	{ 0xff, 0x00, 0x20 },
 41	{ 0xfe, 0x00, 0x20 },
 42	{ }
 43};
 44
 45struct amd_northbridge_info amd_northbridges;
 46EXPORT_SYMBOL(amd_northbridges);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48static struct pci_dev *next_northbridge(struct pci_dev *dev,
 49					const struct pci_device_id *ids)
 50{
 51	do {
 52		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 53		if (!dev)
 54			break;
 55	} while (!pci_match_id(ids, dev));
 56	return dev;
 57}
 58
 59int amd_cache_northbridges(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60{
 61	u16 i = 0;
 
 
 
 62	struct amd_northbridge *nb;
 63	struct pci_dev *misc, *link;
 
 
 
 64
 65	if (amd_nb_num())
 66		return 0;
 67
 
 
 
 
 
 
 68	misc = NULL;
 69	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 70		i++;
 71
 72	if (i == 0)
 73		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74
 75	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 76	if (!nb)
 77		return -ENOMEM;
 78
 79	amd_northbridges.nb = nb;
 80	amd_northbridges.num = i;
 81
 82	link = misc = NULL;
 83	for (i = 0; i != amd_nb_num(); i++) {
 
 
 84		node_to_amd_nb(i)->misc = misc =
 85			next_northbridge(misc, amd_nb_misc_ids);
 86		node_to_amd_nb(i)->link = link =
 87			next_northbridge(link, amd_nb_link_ids);
 
 
 
 
 
 
 
 
 
 
 
 
 88	}
 89
 90	/* GART present only on Fam15h upto model 0fh */
 91	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
 92	    (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
 93		amd_northbridges.flags |= AMD_NB_GART;
 94
 95	/*
 96	 * Check for L3 cache presence.
 97	 */
 98	if (!cpuid_edx(0x80000006))
 99		return 0;
100
101	/*
102	 * Some CPU families support L3 Cache Index Disable. There are some
103	 * limitations because of E382 and E388 on family 0x10.
104	 */
105	if (boot_cpu_data.x86 == 0x10 &&
106	    boot_cpu_data.x86_model >= 0x8 &&
107	    (boot_cpu_data.x86_model > 0x9 ||
108	     boot_cpu_data.x86_mask >= 0x1))
109		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
110
111	if (boot_cpu_data.x86 == 0x15)
112		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
113
114	/* L3 cache partitioning is supported on family 0x15 */
115	if (boot_cpu_data.x86 == 0x15)
116		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
117
118	return 0;
119}
120EXPORT_SYMBOL_GPL(amd_cache_northbridges);
121
122/*
123 * Ignores subdevice/subvendor but as far as I can figure out
124 * they're useless anyways
125 */
126bool __init early_is_amd_nb(u32 device)
127{
 
128	const struct pci_device_id *id;
129	u32 vendor = device & 0xffff;
130
 
 
 
 
 
 
 
131	device >>= 16;
132	for (id = amd_nb_misc_ids; id->vendor; id++)
133		if (vendor == id->vendor && device == id->device)
134			return true;
135	return false;
136}
137
138struct resource *amd_get_mmconfig_range(struct resource *res)
139{
140	u32 address;
141	u64 base, msr;
142	unsigned segn_busn_bits;
143
144	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
 
145		return NULL;
146
147	/* assume all cpus from fam10h have mmconfig */
148        if (boot_cpu_data.x86 < 0x10)
149		return NULL;
150
151	address = MSR_FAM10H_MMIO_CONF_BASE;
152	rdmsrl(address, msr);
153
154	/* mmconfig is not enabled */
155	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
156		return NULL;
157
158	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
159
160	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
161			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
162
163	res->flags = IORESOURCE_MEM;
164	res->start = base;
165	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
166	return res;
167}
168
169int amd_get_subcaches(int cpu)
170{
171	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
172	unsigned int mask;
173	int cuid;
174
175	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
176		return 0;
177
178	pci_read_config_dword(link, 0x1d4, &mask);
179
180	cuid = cpu_data(cpu).compute_unit_id;
181	return (mask >> (4 * cuid)) & 0xf;
182}
183
184int amd_set_subcaches(int cpu, unsigned long mask)
185{
186	static unsigned int reset, ban;
187	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
188	unsigned int reg;
189	int cuid;
190
191	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
192		return -EINVAL;
193
194	/* if necessary, collect reset state of L3 partitioning and BAN mode */
195	if (reset == 0) {
196		pci_read_config_dword(nb->link, 0x1d4, &reset);
197		pci_read_config_dword(nb->misc, 0x1b8, &ban);
198		ban &= 0x180000;
199	}
200
201	/* deactivate BAN mode if any subcaches are to be disabled */
202	if (mask != 0xf) {
203		pci_read_config_dword(nb->misc, 0x1b8, &reg);
204		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
205	}
206
207	cuid = cpu_data(cpu).compute_unit_id;
208	mask <<= 4 * cuid;
209	mask |= (0xf ^ (1 << cuid)) << 26;
210
211	pci_write_config_dword(nb->link, 0x1d4, mask);
212
213	/* reset BAN mode if L3 partitioning returned to reset state */
214	pci_read_config_dword(nb->link, 0x1d4, &reg);
215	if (reg == reset) {
216		pci_read_config_dword(nb->misc, 0x1b8, &reg);
217		reg &= ~0x180000;
218		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
219	}
220
221	return 0;
222}
223
224static int amd_cache_gart(void)
225{
226	u16 i;
227
228       if (!amd_nb_has_feature(AMD_NB_GART))
229               return 0;
230
231       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
232       if (!flush_words) {
233               amd_northbridges.flags &= ~AMD_NB_GART;
234               return -ENOMEM;
235       }
236
237       for (i = 0; i != amd_nb_num(); i++)
238               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
239                                     &flush_words[i]);
240
241       return 0;
 
242}
243
244void amd_flush_garts(void)
245{
246	int flushed, i;
247	unsigned long flags;
248	static DEFINE_SPINLOCK(gart_lock);
249
250	if (!amd_nb_has_feature(AMD_NB_GART))
251		return;
252
253	/* Avoid races between AGP and IOMMU. In theory it's not needed
254	   but I'm not sure if the hardware won't lose flush requests
255	   when another is pending. This whole thing is so expensive anyways
256	   that it doesn't matter to serialize more. -AK */
 
 
257	spin_lock_irqsave(&gart_lock, flags);
258	flushed = 0;
259	for (i = 0; i < amd_nb_num(); i++) {
260		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
261				       flush_words[i] | 1);
262		flushed++;
263	}
264	for (i = 0; i < amd_nb_num(); i++) {
265		u32 w;
266		/* Make sure the hardware actually executed the flush*/
267		for (;;) {
268			pci_read_config_dword(node_to_amd_nb(i)->misc,
269					      0x9c, &w);
270			if (!(w & 1))
271				break;
272			cpu_relax();
273		}
274	}
275	spin_unlock_irqrestore(&gart_lock, flags);
276	if (!flushed)
277		pr_notice("nothing to flush?\n");
278}
279EXPORT_SYMBOL_GPL(amd_flush_garts);
280
281static __init int init_amd_nbs(void)
 
 
 
 
 
 
 
 
 
282{
283	int err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
285	err = amd_cache_northbridges();
 
 
 
286
287	if (err < 0)
288		pr_notice("Cannot enumerate AMD northbridges\n");
 
 
289
290	if (amd_cache_gart() < 0)
291		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
292
293	return err;
294}
295
296/* This has to go after the PCI subsystem */
297fs_initcall(init_amd_nbs);