Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Shared support code for AMD K8 northbridges and derivatives.
  4 * Copyright 2006 Andi Kleen, SUSE Labs.
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/types.h>
 10#include <linux/slab.h>
 11#include <linux/init.h>
 12#include <linux/errno.h>
 13#include <linux/export.h>
 14#include <linux/spinlock.h>
 15#include <linux/pci_ids.h>
 16#include <asm/amd_nb.h>
 17
 18#define PCI_DEVICE_ID_AMD_17H_ROOT		0x1450
 19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT		0x15d0
 20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT		0x1480
 21#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT		0x1630
 22#define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT		0x14b5
 23#define PCI_DEVICE_ID_AMD_19H_M10H_ROOT		0x14a4
 24#define PCI_DEVICE_ID_AMD_19H_M40H_ROOT		0x14b5
 25#define PCI_DEVICE_ID_AMD_19H_M60H_ROOT		0x14d8
 26#define PCI_DEVICE_ID_AMD_19H_M70H_ROOT		0x14e8
 27#define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT		0x153a
 28#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT		0x1507
 29#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT		0x1122
 30#define PCI_DEVICE_ID_AMD_MI200_ROOT		0x14bb
 31#define PCI_DEVICE_ID_AMD_MI300_ROOT		0x14f8
 32
 33#define PCI_DEVICE_ID_AMD_17H_DF_F4		0x1464
 34#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4	0x15ec
 35#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4	0x1494
 36#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4	0x144c
 37#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4	0x1444
 38#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4	0x1728
 39#define PCI_DEVICE_ID_AMD_19H_DF_F4		0x1654
 40#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4	0x14b1
 41#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4	0x167d
 42#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4	0x166e
 43#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4	0x14e4
 44#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4	0x14f4
 45#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4	0x12fc
 46#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4	0x12c4
 47#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F4	0x16fc
 48#define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4	0x124c
 49#define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4	0x12bc
 50#define PCI_DEVICE_ID_AMD_MI200_DF_F4		0x14d4
 51#define PCI_DEVICE_ID_AMD_MI300_DF_F4		0x152c
 52
 53/* Protect the PCI config register pairs used for SMN. */
 54static DEFINE_MUTEX(smn_mutex);
 55
 56static u32 *flush_words;
 57
 58static const struct pci_device_id amd_root_ids[] = {
 59	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
 60	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
 61	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
 62	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
 63	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
 64	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
 65	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
 66	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
 67	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
 68	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
 69	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
 70	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
 71	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
 72	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_ROOT) },
 73	{}
 74};
 75
 76#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
 77
 78static const struct pci_device_id amd_nb_misc_ids[] = {
 79	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 80	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 81	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 82	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 83	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 84	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 85	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 86	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 87	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 88	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
 89	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
 90	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
 91	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
 92	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 93	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
 94	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
 95	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
 96	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
 97	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
 98	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
 99	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
100	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
101	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
102	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
103	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) },
104	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) },
105	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
106	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) },
107	{}
108};
 
109
110static const struct pci_device_id amd_nb_link_ids[] = {
111	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
112	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
113	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
114	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
115	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
116	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
117	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
118	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
119	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
120	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
121	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
122	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
123	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
124	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
125	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
126	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
127	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
128	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
129	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
130	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
131	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F4) },
132	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4) },
133	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4) },
134	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
135	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F4) },
136	{}
137};
138
139static const struct pci_device_id hygon_root_ids[] = {
140	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
141	{}
142};
143
144static const struct pci_device_id hygon_nb_misc_ids[] = {
145	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
146	{}
147};
148
149static const struct pci_device_id hygon_nb_link_ids[] = {
150	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
151	{}
152};
153
154const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
155	{ 0x00, 0x18, 0x20 },
156	{ 0xff, 0x00, 0x20 },
157	{ 0xfe, 0x00, 0x20 },
158	{ }
159};
160
161static struct amd_northbridge_info amd_northbridges;
162
163u16 amd_nb_num(void)
164{
165	return amd_northbridges.num;
166}
167EXPORT_SYMBOL_GPL(amd_nb_num);
168
169bool amd_nb_has_feature(unsigned int feature)
170{
171	return ((amd_northbridges.flags & feature) == feature);
172}
173EXPORT_SYMBOL_GPL(amd_nb_has_feature);
174
175struct amd_northbridge *node_to_amd_nb(int node)
176{
177	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
178}
179EXPORT_SYMBOL_GPL(node_to_amd_nb);
180
181static struct pci_dev *next_northbridge(struct pci_dev *dev,
182					const struct pci_device_id *ids)
183{
184	do {
185		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
186		if (!dev)
187			break;
188	} while (!pci_match_id(ids, dev));
189	return dev;
190}
191
192/*
193 * SMN accesses may fail in ways that are difficult to detect here in the called
194 * functions amd_smn_read() and amd_smn_write(). Therefore, callers must do
195 * their own checking based on what behavior they expect.
196 *
197 * For SMN reads, the returned value may be zero if the register is Read-as-Zero.
198 * Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response"
199 * can be checked here, and a proper error code can be returned.
200 *
201 * But the Read-as-Zero response cannot be verified here. A value of 0 may be
202 * correct in some cases, so callers must check that this correct is for the
203 * register/fields they need.
204 *
205 * For SMN writes, success can be determined through a "write and read back"
206 * However, this is not robust when done here.
207 *
208 * Possible issues:
209 *
210 * 1) Bits that are "Write-1-to-Clear". In this case, the read value should
211 *    *not* match the write value.
212 *
213 * 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be
214 *    known here.
215 *
216 * 3) Bits that are "Reserved / Set to 1". Ditto above.
217 *
218 * Callers of amd_smn_write() should do the "write and read back" check
219 * themselves, if needed.
220 *
221 * For #1, they can see if their target bits got cleared.
222 *
223 * For #2 and #3, they can check if their target bits got set as intended.
224 *
225 * This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then
226 * the operation is considered a success, and the caller does their own
227 * checking.
228 */
229static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
230{
231	struct pci_dev *root;
232	int err = -ENODEV;
233
234	if (node >= amd_northbridges.num)
235		goto out;
236
237	root = node_to_amd_nb(node)->root;
238	if (!root)
239		goto out;
240
241	mutex_lock(&smn_mutex);
242
243	err = pci_write_config_dword(root, 0x60, address);
244	if (err) {
245		pr_warn("Error programming SMN address 0x%x.\n", address);
246		goto out_unlock;
247	}
248
249	err = (write ? pci_write_config_dword(root, 0x64, *value)
250		     : pci_read_config_dword(root, 0x64, value));
251
252out_unlock:
253	mutex_unlock(&smn_mutex);
254
255out:
256	return err;
257}
258
259int __must_check amd_smn_read(u16 node, u32 address, u32 *value)
260{
261	int err = __amd_smn_rw(node, address, value, false);
262
263	if (PCI_POSSIBLE_ERROR(*value)) {
264		err = -ENODEV;
265		*value = 0;
266	}
267
268	return err;
269}
270EXPORT_SYMBOL_GPL(amd_smn_read);
271
272int __must_check amd_smn_write(u16 node, u32 address, u32 value)
273{
274	return __amd_smn_rw(node, address, &value, true);
275}
276EXPORT_SYMBOL_GPL(amd_smn_write);
277
278
279static int amd_cache_northbridges(void)
280{
281	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
282	const struct pci_device_id *link_ids = amd_nb_link_ids;
283	const struct pci_device_id *root_ids = amd_root_ids;
284	struct pci_dev *root, *misc, *link;
285	struct amd_northbridge *nb;
286	u16 roots_per_misc = 0;
287	u16 misc_count = 0;
288	u16 root_count = 0;
289	u16 i, j;
290
291	if (amd_northbridges.num)
292		return 0;
293
294	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
295		root_ids = hygon_root_ids;
296		misc_ids = hygon_nb_misc_ids;
297		link_ids = hygon_nb_link_ids;
298	}
299
300	misc = NULL;
301	while ((misc = next_northbridge(misc, misc_ids)))
302		misc_count++;
303
304	if (!misc_count)
305		return -ENODEV;
306
307	root = NULL;
308	while ((root = next_northbridge(root, root_ids)))
309		root_count++;
310
311	if (root_count) {
312		roots_per_misc = root_count / misc_count;
313
314		/*
315		 * There should be _exactly_ N roots for each DF/SMN
316		 * interface.
317		 */
318		if (!roots_per_misc || (root_count % roots_per_misc)) {
319			pr_info("Unsupported AMD DF/PCI configuration found\n");
320			return -ENODEV;
321		}
322	}
323
324	nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
325	if (!nb)
326		return -ENOMEM;
327
328	amd_northbridges.nb = nb;
329	amd_northbridges.num = misc_count;
330
331	link = misc = root = NULL;
332	for (i = 0; i < amd_northbridges.num; i++) {
333		node_to_amd_nb(i)->root = root =
334			next_northbridge(root, root_ids);
335		node_to_amd_nb(i)->misc = misc =
336			next_northbridge(misc, misc_ids);
337		node_to_amd_nb(i)->link = link =
338			next_northbridge(link, link_ids);
339
340		/*
341		 * If there are more PCI root devices than data fabric/
342		 * system management network interfaces, then the (N)
343		 * PCI roots per DF/SMN interface are functionally the
344		 * same (for DF/SMN access) and N-1 are redundant.  N-1
345		 * PCI roots should be skipped per DF/SMN interface so
346		 * the following DF/SMN interfaces get mapped to
347		 * correct PCI roots.
348		 */
349		for (j = 1; j < roots_per_misc; j++)
350			root = next_northbridge(root, root_ids);
351	}
352
353	if (amd_gart_present())
 
 
354		amd_northbridges.flags |= AMD_NB_GART;
355
356	/*
357	 * Check for L3 cache presence.
358	 */
359	if (!cpuid_edx(0x80000006))
360		return 0;
361
362	/*
363	 * Some CPU families support L3 Cache Index Disable. There are some
364	 * limitations because of E382 and E388 on family 0x10.
365	 */
366	if (boot_cpu_data.x86 == 0x10 &&
367	    boot_cpu_data.x86_model >= 0x8 &&
368	    (boot_cpu_data.x86_model > 0x9 ||
369	     boot_cpu_data.x86_stepping >= 0x1))
370		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
371
372	if (boot_cpu_data.x86 == 0x15)
373		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
374
375	/* L3 cache partitioning is supported on family 0x15 */
376	if (boot_cpu_data.x86 == 0x15)
377		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
378
379	return 0;
380}
 
381
382/*
383 * Ignores subdevice/subvendor but as far as I can figure out
384 * they're useless anyways
385 */
386bool __init early_is_amd_nb(u32 device)
387{
388	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
389	const struct pci_device_id *id;
390	u32 vendor = device & 0xffff;
391
392	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
393	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
394		return false;
395
396	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
397		misc_ids = hygon_nb_misc_ids;
398
399	device >>= 16;
400	for (id = misc_ids; id->vendor; id++)
401		if (vendor == id->vendor && device == id->device)
402			return true;
403	return false;
404}
405
406struct resource *amd_get_mmconfig_range(struct resource *res)
407{
408	u64 base, msr;
409	unsigned int segn_busn_bits;
410
411	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
412	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
413		return NULL;
414
415	/* Assume CPUs from Fam10h have mmconfig, although not all VMs do */
416	if (boot_cpu_data.x86 < 0x10 ||
417	    rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr))
418		return NULL;
419
420	/* mmconfig is not enabled */
421	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
422		return NULL;
423
424	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
425
426	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
427			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
428
429	res->flags = IORESOURCE_MEM;
430	res->start = base;
431	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
432	return res;
433}
434
435int amd_get_subcaches(int cpu)
436{
437	struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link;
438	unsigned int mask;
 
439
440	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
441		return 0;
442
443	pci_read_config_dword(link, 0x1d4, &mask);
444
445	return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf;
 
 
 
446}
447
448int amd_set_subcaches(int cpu, unsigned long mask)
449{
450	static unsigned int reset, ban;
451	struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu));
452	unsigned int reg;
453	int cuid;
454
455	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
456		return -EINVAL;
457
458	/* if necessary, collect reset state of L3 partitioning and BAN mode */
459	if (reset == 0) {
460		pci_read_config_dword(nb->link, 0x1d4, &reset);
461		pci_read_config_dword(nb->misc, 0x1b8, &ban);
462		ban &= 0x180000;
463	}
464
465	/* deactivate BAN mode if any subcaches are to be disabled */
466	if (mask != 0xf) {
467		pci_read_config_dword(nb->misc, 0x1b8, &reg);
468		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
469	}
470
471	cuid = cpu_data(cpu).topo.core_id;
 
 
472	mask <<= 4 * cuid;
473	mask |= (0xf ^ (1 << cuid)) << 26;
474
475	pci_write_config_dword(nb->link, 0x1d4, mask);
476
477	/* reset BAN mode if L3 partitioning returned to reset state */
478	pci_read_config_dword(nb->link, 0x1d4, &reg);
479	if (reg == reset) {
480		pci_read_config_dword(nb->misc, 0x1b8, &reg);
481		reg &= ~0x180000;
482		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
483	}
484
485	return 0;
486}
487
488static void amd_cache_gart(void)
489{
490	u16 i;
491
492	if (!amd_nb_has_feature(AMD_NB_GART))
493		return;
494
495	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
496	if (!flush_words) {
497		amd_northbridges.flags &= ~AMD_NB_GART;
498		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
499		return;
500	}
 
 
 
501
502	for (i = 0; i != amd_northbridges.num; i++)
503		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
504}
505
506void amd_flush_garts(void)
507{
508	int flushed, i;
509	unsigned long flags;
510	static DEFINE_SPINLOCK(gart_lock);
511
512	if (!amd_nb_has_feature(AMD_NB_GART))
513		return;
514
515	/*
516	 * Avoid races between AGP and IOMMU. In theory it's not needed
517	 * but I'm not sure if the hardware won't lose flush requests
518	 * when another is pending. This whole thing is so expensive anyways
519	 * that it doesn't matter to serialize more. -AK
520	 */
521	spin_lock_irqsave(&gart_lock, flags);
522	flushed = 0;
523	for (i = 0; i < amd_northbridges.num; i++) {
524		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
525				       flush_words[i] | 1);
526		flushed++;
527	}
528	for (i = 0; i < amd_northbridges.num; i++) {
529		u32 w;
530		/* Make sure the hardware actually executed the flush*/
531		for (;;) {
532			pci_read_config_dword(node_to_amd_nb(i)->misc,
533					      0x9c, &w);
534			if (!(w & 1))
535				break;
536			cpu_relax();
537		}
538	}
539	spin_unlock_irqrestore(&gart_lock, flags);
540	if (!flushed)
541		pr_notice("nothing to flush?\n");
542}
543EXPORT_SYMBOL_GPL(amd_flush_garts);
544
545static void __fix_erratum_688(void *info)
546{
547#define MSR_AMD64_IC_CFG 0xC0011021
548
549	msr_set_bit(MSR_AMD64_IC_CFG, 3);
550	msr_set_bit(MSR_AMD64_IC_CFG, 14);
551}
552
553/* Apply erratum 688 fix so machines without a BIOS fix work. */
554static __init void fix_erratum_688(void)
555{
556	struct pci_dev *F4;
557	u32 val;
558
559	if (boot_cpu_data.x86 != 0x14)
560		return;
561
562	if (!amd_northbridges.num)
563		return;
564
565	F4 = node_to_amd_nb(0)->link;
566	if (!F4)
567		return;
568
569	if (pci_read_config_dword(F4, 0x164, &val))
570		return;
571
572	if (val & BIT(2))
573		return;
574
575	on_each_cpu(__fix_erratum_688, NULL, 0);
576
577	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
578}
579
580static __init int init_amd_nbs(void)
581{
582	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
583	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
584		return 0;
585
586	amd_cache_northbridges();
587	amd_cache_gart();
588
589	fix_erratum_688();
 
590
591	return 0;
 
 
 
 
592}
593
594/* This has to go after the PCI subsystem */
595fs_initcall(init_amd_nbs);
v3.1
 
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
 
 
 
  5#include <linux/types.h>
  6#include <linux/slab.h>
  7#include <linux/init.h>
  8#include <linux/errno.h>
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 
 11#include <asm/amd_nb.h>
 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13static u32 *flush_words;
 14
 15const struct pci_device_id amd_nb_misc_ids[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 16	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 17	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 18	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19	{}
 20};
 21EXPORT_SYMBOL(amd_nb_misc_ids);
 22
 23static struct pci_device_id amd_nb_link_ids[] = {
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25	{}
 26};
 27
 28const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 29	{ 0x00, 0x18, 0x20 },
 30	{ 0xff, 0x00, 0x20 },
 31	{ 0xfe, 0x00, 0x20 },
 32	{ }
 33};
 34
 35struct amd_northbridge_info amd_northbridges;
 36EXPORT_SYMBOL(amd_northbridges);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38static struct pci_dev *next_northbridge(struct pci_dev *dev,
 39					const struct pci_device_id *ids)
 40{
 41	do {
 42		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 43		if (!dev)
 44			break;
 45	} while (!pci_match_id(ids, dev));
 46	return dev;
 47}
 48
 49int amd_cache_northbridges(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50{
 51	u16 i = 0;
 
 
 
 52	struct amd_northbridge *nb;
 53	struct pci_dev *misc, *link;
 
 
 
 54
 55	if (amd_nb_num())
 56		return 0;
 57
 
 
 
 
 
 
 58	misc = NULL;
 59	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 60		i++;
 61
 62	if (i == 0)
 63		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64
 65	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 66	if (!nb)
 67		return -ENOMEM;
 68
 69	amd_northbridges.nb = nb;
 70	amd_northbridges.num = i;
 71
 72	link = misc = NULL;
 73	for (i = 0; i != amd_nb_num(); i++) {
 
 
 74		node_to_amd_nb(i)->misc = misc =
 75			next_northbridge(misc, amd_nb_misc_ids);
 76		node_to_amd_nb(i)->link = link =
 77			next_northbridge(link, amd_nb_link_ids);
 78        }
 
 
 
 
 
 
 
 
 
 
 
 
 79
 80	/* some CPU families (e.g. family 0x11) do not support GART */
 81	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
 82	    boot_cpu_data.x86 == 0x15)
 83		amd_northbridges.flags |= AMD_NB_GART;
 84
 85	/*
 
 
 
 
 
 
 86	 * Some CPU families support L3 Cache Index Disable. There are some
 87	 * limitations because of E382 and E388 on family 0x10.
 88	 */
 89	if (boot_cpu_data.x86 == 0x10 &&
 90	    boot_cpu_data.x86_model >= 0x8 &&
 91	    (boot_cpu_data.x86_model > 0x9 ||
 92	     boot_cpu_data.x86_mask >= 0x1))
 93		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 94
 95	if (boot_cpu_data.x86 == 0x15)
 96		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 97
 98	/* L3 cache partitioning is supported on family 0x15 */
 99	if (boot_cpu_data.x86 == 0x15)
100		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101
102	return 0;
103}
104EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105
106/*
107 * Ignores subdevice/subvendor but as far as I can figure out
108 * they're useless anyways
109 */
110bool __init early_is_amd_nb(u32 device)
111{
 
112	const struct pci_device_id *id;
113	u32 vendor = device & 0xffff;
114
 
 
 
 
 
 
 
115	device >>= 16;
116	for (id = amd_nb_misc_ids; id->vendor; id++)
117		if (vendor == id->vendor && device == id->device)
118			return true;
119	return false;
120}
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122int amd_get_subcaches(int cpu)
123{
124	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
125	unsigned int mask;
126	int cuid = 0;
127
128	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
129		return 0;
130
131	pci_read_config_dword(link, 0x1d4, &mask);
132
133#ifdef CONFIG_SMP
134	cuid = cpu_data(cpu).compute_unit_id;
135#endif
136	return (mask >> (4 * cuid)) & 0xf;
137}
138
139int amd_set_subcaches(int cpu, int mask)
140{
141	static unsigned int reset, ban;
142	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
143	unsigned int reg;
144	int cuid = 0;
145
146	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
147		return -EINVAL;
148
149	/* if necessary, collect reset state of L3 partitioning and BAN mode */
150	if (reset == 0) {
151		pci_read_config_dword(nb->link, 0x1d4, &reset);
152		pci_read_config_dword(nb->misc, 0x1b8, &ban);
153		ban &= 0x180000;
154	}
155
156	/* deactivate BAN mode if any subcaches are to be disabled */
157	if (mask != 0xf) {
158		pci_read_config_dword(nb->misc, 0x1b8, &reg);
159		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
160	}
161
162#ifdef CONFIG_SMP
163	cuid = cpu_data(cpu).compute_unit_id;
164#endif
165	mask <<= 4 * cuid;
166	mask |= (0xf ^ (1 << cuid)) << 26;
167
168	pci_write_config_dword(nb->link, 0x1d4, mask);
169
170	/* reset BAN mode if L3 partitioning returned to reset state */
171	pci_read_config_dword(nb->link, 0x1d4, &reg);
172	if (reg == reset) {
173		pci_read_config_dword(nb->misc, 0x1b8, &reg);
174		reg &= ~0x180000;
175		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
176	}
177
178	return 0;
179}
180
181static int amd_cache_gart(void)
182{
183	u16 i;
184
185       if (!amd_nb_has_feature(AMD_NB_GART))
186               return 0;
187
188       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
189       if (!flush_words) {
190               amd_northbridges.flags &= ~AMD_NB_GART;
191               return -ENOMEM;
192       }
193
194       for (i = 0; i != amd_nb_num(); i++)
195               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
196                                     &flush_words[i]);
197
198       return 0;
 
199}
200
201void amd_flush_garts(void)
202{
203	int flushed, i;
204	unsigned long flags;
205	static DEFINE_SPINLOCK(gart_lock);
206
207	if (!amd_nb_has_feature(AMD_NB_GART))
208		return;
209
210	/* Avoid races between AGP and IOMMU. In theory it's not needed
211	   but I'm not sure if the hardware won't lose flush requests
212	   when another is pending. This whole thing is so expensive anyways
213	   that it doesn't matter to serialize more. -AK */
 
 
214	spin_lock_irqsave(&gart_lock, flags);
215	flushed = 0;
216	for (i = 0; i < amd_nb_num(); i++) {
217		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
218				       flush_words[i] | 1);
219		flushed++;
220	}
221	for (i = 0; i < amd_nb_num(); i++) {
222		u32 w;
223		/* Make sure the hardware actually executed the flush*/
224		for (;;) {
225			pci_read_config_dword(node_to_amd_nb(i)->misc,
226					      0x9c, &w);
227			if (!(w & 1))
228				break;
229			cpu_relax();
230		}
231	}
232	spin_unlock_irqrestore(&gart_lock, flags);
233	if (!flushed)
234		printk("nothing to flush?\n");
235}
236EXPORT_SYMBOL_GPL(amd_flush_garts);
237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238static __init int init_amd_nbs(void)
239{
240	int err = 0;
 
 
241
242	err = amd_cache_northbridges();
 
243
244	if (err < 0)
245		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
246
247	if (amd_cache_gart() < 0)
248		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
249		       "GART support disabled.\n");
250
251	return err;
252}
253
254/* This has to go after the PCI subsystem */
255fs_initcall(init_amd_nbs);