Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Shared support code for AMD K8 northbridges and derivates.
  4 * Copyright 2006 Andi Kleen, SUSE Labs.
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/types.h>
 10#include <linux/slab.h>
 11#include <linux/init.h>
 12#include <linux/errno.h>
 13#include <linux/export.h>
 14#include <linux/spinlock.h>
 15#include <linux/pci_ids.h>
 16#include <asm/amd_nb.h>
 17
 18#define PCI_DEVICE_ID_AMD_17H_ROOT	0x1450
 19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT	0x15d0
 20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT	0x1480
 21#define PCI_DEVICE_ID_AMD_17H_DF_F4	0x1464
 22#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
 23#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
 24#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
 25
 26/* Protect the PCI config register pairs used for SMN and DF indirect access. */
 27static DEFINE_MUTEX(smn_mutex);
 28
 29static u32 *flush_words;
 30
 31static const struct pci_device_id amd_root_ids[] = {
 32	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
 33	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
 34	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
 35	{}
 36};
 37
 38
 39#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
 40
 41const struct pci_device_id amd_nb_misc_ids[] = {
 42	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 43	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 44	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 45	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 46	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 47	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 48	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 49	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 50	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 51	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
 52	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
 53	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 54	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
 55	{}
 56};
 57EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
 58
 59static const struct pci_device_id amd_nb_link_ids[] = {
 60	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 61	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 62	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
 63	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 64	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 65	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 66	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
 67	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
 68	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
 69	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
 70	{}
 71};
 72
 73static const struct pci_device_id hygon_root_ids[] = {
 74	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
 75	{}
 76};
 77
 78static const struct pci_device_id hygon_nb_misc_ids[] = {
 79	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 80	{}
 81};
 82
 83static const struct pci_device_id hygon_nb_link_ids[] = {
 84	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 85	{}
 86};
 87
 88const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 89	{ 0x00, 0x18, 0x20 },
 90	{ 0xff, 0x00, 0x20 },
 91	{ 0xfe, 0x00, 0x20 },
 92	{ }
 93};
 94
 95static struct amd_northbridge_info amd_northbridges;
 96
 97u16 amd_nb_num(void)
 98{
 99	return amd_northbridges.num;
100}
101EXPORT_SYMBOL_GPL(amd_nb_num);
102
103bool amd_nb_has_feature(unsigned int feature)
104{
105	return ((amd_northbridges.flags & feature) == feature);
106}
107EXPORT_SYMBOL_GPL(amd_nb_has_feature);
108
109struct amd_northbridge *node_to_amd_nb(int node)
110{
111	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
112}
113EXPORT_SYMBOL_GPL(node_to_amd_nb);
114
115static struct pci_dev *next_northbridge(struct pci_dev *dev,
116					const struct pci_device_id *ids)
117{
118	do {
119		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
120		if (!dev)
121			break;
122	} while (!pci_match_id(ids, dev));
123	return dev;
124}
125
126static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
127{
128	struct pci_dev *root;
129	int err = -ENODEV;
130
131	if (node >= amd_northbridges.num)
132		goto out;
133
134	root = node_to_amd_nb(node)->root;
135	if (!root)
136		goto out;
137
138	mutex_lock(&smn_mutex);
139
140	err = pci_write_config_dword(root, 0x60, address);
141	if (err) {
142		pr_warn("Error programming SMN address 0x%x.\n", address);
143		goto out_unlock;
144	}
145
146	err = (write ? pci_write_config_dword(root, 0x64, *value)
147		     : pci_read_config_dword(root, 0x64, value));
148	if (err)
149		pr_warn("Error %s SMN address 0x%x.\n",
150			(write ? "writing to" : "reading from"), address);
151
152out_unlock:
153	mutex_unlock(&smn_mutex);
154
155out:
156	return err;
157}
158
159int amd_smn_read(u16 node, u32 address, u32 *value)
160{
161	return __amd_smn_rw(node, address, value, false);
162}
163EXPORT_SYMBOL_GPL(amd_smn_read);
164
165int amd_smn_write(u16 node, u32 address, u32 value)
166{
167	return __amd_smn_rw(node, address, &value, true);
168}
169EXPORT_SYMBOL_GPL(amd_smn_write);
170
171/*
172 * Data Fabric Indirect Access uses FICAA/FICAD.
173 *
174 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
175 * on the device's Instance Id and the PCI function and register offset of
176 * the desired register.
177 *
178 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
179 * and FICAD HI registers but so far we only need the LO register.
180 */
181int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
182{
183	struct pci_dev *F4;
184	u32 ficaa;
185	int err = -ENODEV;
186
187	if (node >= amd_northbridges.num)
188		goto out;
189
190	F4 = node_to_amd_nb(node)->link;
191	if (!F4)
192		goto out;
193
194	ficaa  = 1;
195	ficaa |= reg & 0x3FC;
196	ficaa |= (func & 0x7) << 11;
197	ficaa |= instance_id << 16;
198
199	mutex_lock(&smn_mutex);
200
201	err = pci_write_config_dword(F4, 0x5C, ficaa);
202	if (err) {
203		pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
204		goto out_unlock;
205	}
206
207	err = pci_read_config_dword(F4, 0x98, lo);
208	if (err)
209		pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
210
211out_unlock:
212	mutex_unlock(&smn_mutex);
213
214out:
215	return err;
216}
217EXPORT_SYMBOL_GPL(amd_df_indirect_read);
218
219int amd_cache_northbridges(void)
220{
221	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
222	const struct pci_device_id *link_ids = amd_nb_link_ids;
223	const struct pci_device_id *root_ids = amd_root_ids;
224	struct pci_dev *root, *misc, *link;
225	struct amd_northbridge *nb;
226	u16 roots_per_misc = 0;
227	u16 misc_count = 0;
228	u16 root_count = 0;
229	u16 i, j;
230
231	if (amd_northbridges.num)
232		return 0;
233
234	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
235		root_ids = hygon_root_ids;
236		misc_ids = hygon_nb_misc_ids;
237		link_ids = hygon_nb_link_ids;
238	}
239
240	misc = NULL;
241	while ((misc = next_northbridge(misc, misc_ids)) != NULL)
242		misc_count++;
243
244	if (!misc_count)
245		return -ENODEV;
246
247	root = NULL;
248	while ((root = next_northbridge(root, root_ids)) != NULL)
249		root_count++;
250
251	if (root_count) {
252		roots_per_misc = root_count / misc_count;
253
254		/*
255		 * There should be _exactly_ N roots for each DF/SMN
256		 * interface.
257		 */
258		if (!roots_per_misc || (root_count % roots_per_misc)) {
259			pr_info("Unsupported AMD DF/PCI configuration found\n");
260			return -ENODEV;
261		}
262	}
263
264	nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
265	if (!nb)
266		return -ENOMEM;
267
268	amd_northbridges.nb = nb;
269	amd_northbridges.num = misc_count;
270
271	link = misc = root = NULL;
272	for (i = 0; i < amd_northbridges.num; i++) {
273		node_to_amd_nb(i)->root = root =
274			next_northbridge(root, root_ids);
275		node_to_amd_nb(i)->misc = misc =
276			next_northbridge(misc, misc_ids);
277		node_to_amd_nb(i)->link = link =
278			next_northbridge(link, link_ids);
279
280		/*
281		 * If there are more PCI root devices than data fabric/
282		 * system management network interfaces, then the (N)
283		 * PCI roots per DF/SMN interface are functionally the
284		 * same (for DF/SMN access) and N-1 are redundant.  N-1
285		 * PCI roots should be skipped per DF/SMN interface so
286		 * the following DF/SMN interfaces get mapped to
287		 * correct PCI roots.
288		 */
289		for (j = 1; j < roots_per_misc; j++)
290			root = next_northbridge(root, root_ids);
291	}
292
293	if (amd_gart_present())
294		amd_northbridges.flags |= AMD_NB_GART;
295
296	/*
297	 * Check for L3 cache presence.
298	 */
299	if (!cpuid_edx(0x80000006))
300		return 0;
301
302	/*
303	 * Some CPU families support L3 Cache Index Disable. There are some
304	 * limitations because of E382 and E388 on family 0x10.
305	 */
306	if (boot_cpu_data.x86 == 0x10 &&
307	    boot_cpu_data.x86_model >= 0x8 &&
308	    (boot_cpu_data.x86_model > 0x9 ||
309	     boot_cpu_data.x86_stepping >= 0x1))
310		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
311
312	if (boot_cpu_data.x86 == 0x15)
313		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
314
315	/* L3 cache partitioning is supported on family 0x15 */
316	if (boot_cpu_data.x86 == 0x15)
317		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
318
319	return 0;
320}
321EXPORT_SYMBOL_GPL(amd_cache_northbridges);
322
323/*
324 * Ignores subdevice/subvendor but as far as I can figure out
325 * they're useless anyways
326 */
327bool __init early_is_amd_nb(u32 device)
328{
329	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
330	const struct pci_device_id *id;
331	u32 vendor = device & 0xffff;
332
333	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
334	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
335		return false;
336
337	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
338		misc_ids = hygon_nb_misc_ids;
339
340	device >>= 16;
341	for (id = misc_ids; id->vendor; id++)
342		if (vendor == id->vendor && device == id->device)
343			return true;
344	return false;
345}
346
347struct resource *amd_get_mmconfig_range(struct resource *res)
348{
349	u32 address;
350	u64 base, msr;
351	unsigned int segn_busn_bits;
352
353	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
354	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
355		return NULL;
356
357	/* assume all cpus from fam10h have mmconfig */
358	if (boot_cpu_data.x86 < 0x10)
359		return NULL;
360
361	address = MSR_FAM10H_MMIO_CONF_BASE;
362	rdmsrl(address, msr);
363
364	/* mmconfig is not enabled */
365	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
366		return NULL;
367
368	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
369
370	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
371			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
372
373	res->flags = IORESOURCE_MEM;
374	res->start = base;
375	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
376	return res;
377}
378
379int amd_get_subcaches(int cpu)
380{
381	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
382	unsigned int mask;
383
384	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
385		return 0;
386
387	pci_read_config_dword(link, 0x1d4, &mask);
388
389	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
390}
391
392int amd_set_subcaches(int cpu, unsigned long mask)
393{
394	static unsigned int reset, ban;
395	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
396	unsigned int reg;
397	int cuid;
398
399	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
400		return -EINVAL;
401
402	/* if necessary, collect reset state of L3 partitioning and BAN mode */
403	if (reset == 0) {
404		pci_read_config_dword(nb->link, 0x1d4, &reset);
405		pci_read_config_dword(nb->misc, 0x1b8, &ban);
406		ban &= 0x180000;
407	}
408
409	/* deactivate BAN mode if any subcaches are to be disabled */
410	if (mask != 0xf) {
411		pci_read_config_dword(nb->misc, 0x1b8, &reg);
412		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
413	}
414
415	cuid = cpu_data(cpu).cpu_core_id;
416	mask <<= 4 * cuid;
417	mask |= (0xf ^ (1 << cuid)) << 26;
418
419	pci_write_config_dword(nb->link, 0x1d4, mask);
420
421	/* reset BAN mode if L3 partitioning returned to reset state */
422	pci_read_config_dword(nb->link, 0x1d4, &reg);
423	if (reg == reset) {
424		pci_read_config_dword(nb->misc, 0x1b8, &reg);
425		reg &= ~0x180000;
426		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
427	}
428
429	return 0;
430}
431
432static void amd_cache_gart(void)
433{
434	u16 i;
435
436	if (!amd_nb_has_feature(AMD_NB_GART))
437		return;
438
439	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
440	if (!flush_words) {
441		amd_northbridges.flags &= ~AMD_NB_GART;
442		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
443		return;
444	}
 
 
 
445
446	for (i = 0; i != amd_northbridges.num; i++)
447		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
448}
449
450void amd_flush_garts(void)
451{
452	int flushed, i;
453	unsigned long flags;
454	static DEFINE_SPINLOCK(gart_lock);
455
456	if (!amd_nb_has_feature(AMD_NB_GART))
457		return;
458
459	/*
460	 * Avoid races between AGP and IOMMU. In theory it's not needed
461	 * but I'm not sure if the hardware won't lose flush requests
462	 * when another is pending. This whole thing is so expensive anyways
463	 * that it doesn't matter to serialize more. -AK
464	 */
465	spin_lock_irqsave(&gart_lock, flags);
466	flushed = 0;
467	for (i = 0; i < amd_northbridges.num; i++) {
468		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
469				       flush_words[i] | 1);
470		flushed++;
471	}
472	for (i = 0; i < amd_northbridges.num; i++) {
473		u32 w;
474		/* Make sure the hardware actually executed the flush*/
475		for (;;) {
476			pci_read_config_dword(node_to_amd_nb(i)->misc,
477					      0x9c, &w);
478			if (!(w & 1))
479				break;
480			cpu_relax();
481		}
482	}
483	spin_unlock_irqrestore(&gart_lock, flags);
484	if (!flushed)
485		pr_notice("nothing to flush?\n");
486}
487EXPORT_SYMBOL_GPL(amd_flush_garts);
488
489static void __fix_erratum_688(void *info)
490{
491#define MSR_AMD64_IC_CFG 0xC0011021
492
493	msr_set_bit(MSR_AMD64_IC_CFG, 3);
494	msr_set_bit(MSR_AMD64_IC_CFG, 14);
495}
496
497/* Apply erratum 688 fix so machines without a BIOS fix work. */
498static __init void fix_erratum_688(void)
499{
500	struct pci_dev *F4;
501	u32 val;
502
503	if (boot_cpu_data.x86 != 0x14)
504		return;
505
506	if (!amd_northbridges.num)
507		return;
508
509	F4 = node_to_amd_nb(0)->link;
510	if (!F4)
511		return;
512
513	if (pci_read_config_dword(F4, 0x164, &val))
514		return;
515
516	if (val & BIT(2))
517		return;
518
519	on_each_cpu(__fix_erratum_688, NULL, 0);
 
520
521	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
522}
523
524static __init int init_amd_nbs(void)
525{
526	amd_cache_northbridges();
527	amd_cache_gart();
528
529	fix_erratum_688();
530
531	return 0;
532}
533
534/* This has to go after the PCI subsystem */
535fs_initcall(init_amd_nbs);
v4.6
 
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
  5
  6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7
  8#include <linux/types.h>
  9#include <linux/slab.h>
 10#include <linux/init.h>
 11#include <linux/errno.h>
 12#include <linux/module.h>
 13#include <linux/spinlock.h>
 
 14#include <asm/amd_nb.h>
 15
 
 
 
 
 
 
 
 
 
 
 
 16static u32 *flush_words;
 17
 
 
 
 
 
 
 
 
 
 
 18const struct pci_device_id amd_nb_misc_ids[] = {
 19	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 20	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 21	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 22	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 23	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 25	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 26	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 
 
 
 
 
 27	{}
 28};
 29EXPORT_SYMBOL(amd_nb_misc_ids);
 30
 31static const struct pci_device_id amd_nb_link_ids[] = {
 32	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 33	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 34	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
 35	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 36	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37	{}
 38};
 39
 40const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 41	{ 0x00, 0x18, 0x20 },
 42	{ 0xff, 0x00, 0x20 },
 43	{ 0xfe, 0x00, 0x20 },
 44	{ }
 45};
 46
 47struct amd_northbridge_info amd_northbridges;
 48EXPORT_SYMBOL(amd_northbridges);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49
 50static struct pci_dev *next_northbridge(struct pci_dev *dev,
 51					const struct pci_device_id *ids)
 52{
 53	do {
 54		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 55		if (!dev)
 56			break;
 57	} while (!pci_match_id(ids, dev));
 58	return dev;
 59}
 60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61int amd_cache_northbridges(void)
 62{
 63	u16 i = 0;
 
 
 
 64	struct amd_northbridge *nb;
 65	struct pci_dev *misc, *link;
 
 
 
 66
 67	if (amd_nb_num())
 68		return 0;
 69
 
 
 
 
 
 
 70	misc = NULL;
 71	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 72		i++;
 
 
 
 73
 74	if (i == 0)
 75		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 78	if (!nb)
 79		return -ENOMEM;
 80
 81	amd_northbridges.nb = nb;
 82	amd_northbridges.num = i;
 83
 84	link = misc = NULL;
 85	for (i = 0; i != amd_nb_num(); i++) {
 
 
 86		node_to_amd_nb(i)->misc = misc =
 87			next_northbridge(misc, amd_nb_misc_ids);
 88		node_to_amd_nb(i)->link = link =
 89			next_northbridge(link, amd_nb_link_ids);
 
 
 
 
 
 
 
 
 
 
 
 
 90	}
 91
 92	if (amd_gart_present())
 93		amd_northbridges.flags |= AMD_NB_GART;
 94
 95	/*
 96	 * Check for L3 cache presence.
 97	 */
 98	if (!cpuid_edx(0x80000006))
 99		return 0;
100
101	/*
102	 * Some CPU families support L3 Cache Index Disable. There are some
103	 * limitations because of E382 and E388 on family 0x10.
104	 */
105	if (boot_cpu_data.x86 == 0x10 &&
106	    boot_cpu_data.x86_model >= 0x8 &&
107	    (boot_cpu_data.x86_model > 0x9 ||
108	     boot_cpu_data.x86_mask >= 0x1))
109		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
110
111	if (boot_cpu_data.x86 == 0x15)
112		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
113
114	/* L3 cache partitioning is supported on family 0x15 */
115	if (boot_cpu_data.x86 == 0x15)
116		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
117
118	return 0;
119}
120EXPORT_SYMBOL_GPL(amd_cache_northbridges);
121
122/*
123 * Ignores subdevice/subvendor but as far as I can figure out
124 * they're useless anyways
125 */
126bool __init early_is_amd_nb(u32 device)
127{
 
128	const struct pci_device_id *id;
129	u32 vendor = device & 0xffff;
130
 
 
 
 
 
 
 
131	device >>= 16;
132	for (id = amd_nb_misc_ids; id->vendor; id++)
133		if (vendor == id->vendor && device == id->device)
134			return true;
135	return false;
136}
137
138struct resource *amd_get_mmconfig_range(struct resource *res)
139{
140	u32 address;
141	u64 base, msr;
142	unsigned segn_busn_bits;
143
144	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
 
145		return NULL;
146
147	/* assume all cpus from fam10h have mmconfig */
148        if (boot_cpu_data.x86 < 0x10)
149		return NULL;
150
151	address = MSR_FAM10H_MMIO_CONF_BASE;
152	rdmsrl(address, msr);
153
154	/* mmconfig is not enabled */
155	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
156		return NULL;
157
158	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
159
160	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
161			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
162
163	res->flags = IORESOURCE_MEM;
164	res->start = base;
165	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
166	return res;
167}
168
169int amd_get_subcaches(int cpu)
170{
171	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
172	unsigned int mask;
173
174	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
175		return 0;
176
177	pci_read_config_dword(link, 0x1d4, &mask);
178
179	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
180}
181
182int amd_set_subcaches(int cpu, unsigned long mask)
183{
184	static unsigned int reset, ban;
185	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
186	unsigned int reg;
187	int cuid;
188
189	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
190		return -EINVAL;
191
192	/* if necessary, collect reset state of L3 partitioning and BAN mode */
193	if (reset == 0) {
194		pci_read_config_dword(nb->link, 0x1d4, &reset);
195		pci_read_config_dword(nb->misc, 0x1b8, &ban);
196		ban &= 0x180000;
197	}
198
199	/* deactivate BAN mode if any subcaches are to be disabled */
200	if (mask != 0xf) {
201		pci_read_config_dword(nb->misc, 0x1b8, &reg);
202		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
203	}
204
205	cuid = cpu_data(cpu).cpu_core_id;
206	mask <<= 4 * cuid;
207	mask |= (0xf ^ (1 << cuid)) << 26;
208
209	pci_write_config_dword(nb->link, 0x1d4, mask);
210
211	/* reset BAN mode if L3 partitioning returned to reset state */
212	pci_read_config_dword(nb->link, 0x1d4, &reg);
213	if (reg == reset) {
214		pci_read_config_dword(nb->misc, 0x1b8, &reg);
215		reg &= ~0x180000;
216		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
217	}
218
219	return 0;
220}
221
222static int amd_cache_gart(void)
223{
224	u16 i;
225
226       if (!amd_nb_has_feature(AMD_NB_GART))
227               return 0;
228
229       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
230       if (!flush_words) {
231               amd_northbridges.flags &= ~AMD_NB_GART;
232               return -ENOMEM;
233       }
234
235       for (i = 0; i != amd_nb_num(); i++)
236               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
237                                     &flush_words[i]);
238
239       return 0;
 
240}
241
242void amd_flush_garts(void)
243{
244	int flushed, i;
245	unsigned long flags;
246	static DEFINE_SPINLOCK(gart_lock);
247
248	if (!amd_nb_has_feature(AMD_NB_GART))
249		return;
250
251	/* Avoid races between AGP and IOMMU. In theory it's not needed
252	   but I'm not sure if the hardware won't lose flush requests
253	   when another is pending. This whole thing is so expensive anyways
254	   that it doesn't matter to serialize more. -AK */
 
 
255	spin_lock_irqsave(&gart_lock, flags);
256	flushed = 0;
257	for (i = 0; i < amd_nb_num(); i++) {
258		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
259				       flush_words[i] | 1);
260		flushed++;
261	}
262	for (i = 0; i < amd_nb_num(); i++) {
263		u32 w;
264		/* Make sure the hardware actually executed the flush*/
265		for (;;) {
266			pci_read_config_dword(node_to_amd_nb(i)->misc,
267					      0x9c, &w);
268			if (!(w & 1))
269				break;
270			cpu_relax();
271		}
272	}
273	spin_unlock_irqrestore(&gart_lock, flags);
274	if (!flushed)
275		pr_notice("nothing to flush?\n");
276}
277EXPORT_SYMBOL_GPL(amd_flush_garts);
278
279static __init int init_amd_nbs(void)
 
 
 
 
 
 
 
 
 
280{
281	int err = 0;
 
 
 
 
 
 
 
 
 
 
 
282
283	err = amd_cache_northbridges();
 
 
 
 
284
285	if (err < 0)
286		pr_notice("Cannot enumerate AMD northbridges\n");
287
288	if (amd_cache_gart() < 0)
289		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
 
 
 
 
 
 
 
290
291	return err;
292}
293
294/* This has to go after the PCI subsystem */
295fs_initcall(init_amd_nbs);