Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Shared support code for AMD K8 northbridges and derivates.
  4 * Copyright 2006 Andi Kleen, SUSE Labs.
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/types.h>
 10#include <linux/slab.h>
 11#include <linux/init.h>
 12#include <linux/errno.h>
 13#include <linux/export.h>
 14#include <linux/spinlock.h>
 15#include <linux/pci_ids.h>
 16#include <asm/amd_nb.h>
 17
 18#define PCI_DEVICE_ID_AMD_17H_ROOT	0x1450
 19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT	0x15d0
 20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT	0x1480
 21#define PCI_DEVICE_ID_AMD_17H_DF_F4	0x1464
 22#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
 23#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
 24#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
 25
 26/* Protect the PCI config register pairs used for SMN and DF indirect access. */
 27static DEFINE_MUTEX(smn_mutex);
 28
 29static u32 *flush_words;
 30
 31static const struct pci_device_id amd_root_ids[] = {
 32	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
 33	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
 34	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
 35	{}
 36};
 37
 38
 39#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
 40
 41const struct pci_device_id amd_nb_misc_ids[] = {
 42	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 43	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 44	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 45	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 46	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 47	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 48	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 49	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 50	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 51	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
 52	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
 53	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 54	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
 55	{}
 56};
 57EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
 58
 59static const struct pci_device_id amd_nb_link_ids[] = {
 60	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 61	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 62	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
 63	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 64	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 65	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 66	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
 67	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
 68	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
 69	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
 70	{}
 71};
 72
 73static const struct pci_device_id hygon_root_ids[] = {
 74	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
 75	{}
 76};
 77
 78static const struct pci_device_id hygon_nb_misc_ids[] = {
 79	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 80	{}
 81};
 82
 83static const struct pci_device_id hygon_nb_link_ids[] = {
 84	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 85	{}
 86};
 87
 88const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 89	{ 0x00, 0x18, 0x20 },
 90	{ 0xff, 0x00, 0x20 },
 91	{ 0xfe, 0x00, 0x20 },
 92	{ }
 93};
 94
 95static struct amd_northbridge_info amd_northbridges;
 96
 97u16 amd_nb_num(void)
 98{
 99	return amd_northbridges.num;
100}
101EXPORT_SYMBOL_GPL(amd_nb_num);
102
103bool amd_nb_has_feature(unsigned int feature)
104{
105	return ((amd_northbridges.flags & feature) == feature);
106}
107EXPORT_SYMBOL_GPL(amd_nb_has_feature);
108
109struct amd_northbridge *node_to_amd_nb(int node)
110{
111	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
112}
113EXPORT_SYMBOL_GPL(node_to_amd_nb);
114
115static struct pci_dev *next_northbridge(struct pci_dev *dev,
116					const struct pci_device_id *ids)
117{
118	do {
119		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
120		if (!dev)
121			break;
122	} while (!pci_match_id(ids, dev));
123	return dev;
124}
125
126static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
127{
128	struct pci_dev *root;
129	int err = -ENODEV;
130
131	if (node >= amd_northbridges.num)
132		goto out;
133
134	root = node_to_amd_nb(node)->root;
135	if (!root)
136		goto out;
137
138	mutex_lock(&smn_mutex);
139
140	err = pci_write_config_dword(root, 0x60, address);
141	if (err) {
142		pr_warn("Error programming SMN address 0x%x.\n", address);
143		goto out_unlock;
144	}
145
146	err = (write ? pci_write_config_dword(root, 0x64, *value)
147		     : pci_read_config_dword(root, 0x64, value));
148	if (err)
149		pr_warn("Error %s SMN address 0x%x.\n",
150			(write ? "writing to" : "reading from"), address);
151
152out_unlock:
153	mutex_unlock(&smn_mutex);
154
155out:
156	return err;
157}
158
159int amd_smn_read(u16 node, u32 address, u32 *value)
160{
161	return __amd_smn_rw(node, address, value, false);
162}
163EXPORT_SYMBOL_GPL(amd_smn_read);
164
165int amd_smn_write(u16 node, u32 address, u32 value)
166{
167	return __amd_smn_rw(node, address, &value, true);
168}
169EXPORT_SYMBOL_GPL(amd_smn_write);
170
171/*
172 * Data Fabric Indirect Access uses FICAA/FICAD.
173 *
174 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
175 * on the device's Instance Id and the PCI function and register offset of
176 * the desired register.
177 *
178 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
179 * and FICAD HI registers but so far we only need the LO register.
180 */
181int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
182{
183	struct pci_dev *F4;
184	u32 ficaa;
185	int err = -ENODEV;
186
187	if (node >= amd_northbridges.num)
188		goto out;
189
190	F4 = node_to_amd_nb(node)->link;
191	if (!F4)
192		goto out;
193
194	ficaa  = 1;
195	ficaa |= reg & 0x3FC;
196	ficaa |= (func & 0x7) << 11;
197	ficaa |= instance_id << 16;
198
199	mutex_lock(&smn_mutex);
200
201	err = pci_write_config_dword(F4, 0x5C, ficaa);
202	if (err) {
203		pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
204		goto out_unlock;
205	}
206
207	err = pci_read_config_dword(F4, 0x98, lo);
208	if (err)
209		pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
210
211out_unlock:
212	mutex_unlock(&smn_mutex);
213
214out:
215	return err;
216}
217EXPORT_SYMBOL_GPL(amd_df_indirect_read);
218
219int amd_cache_northbridges(void)
220{
221	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
222	const struct pci_device_id *link_ids = amd_nb_link_ids;
223	const struct pci_device_id *root_ids = amd_root_ids;
224	struct pci_dev *root, *misc, *link;
225	struct amd_northbridge *nb;
226	u16 roots_per_misc = 0;
227	u16 misc_count = 0;
228	u16 root_count = 0;
229	u16 i, j;
230
231	if (amd_northbridges.num)
232		return 0;
233
234	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
235		root_ids = hygon_root_ids;
236		misc_ids = hygon_nb_misc_ids;
237		link_ids = hygon_nb_link_ids;
238	}
239
240	misc = NULL;
241	while ((misc = next_northbridge(misc, misc_ids)) != NULL)
242		misc_count++;
243
244	if (!misc_count)
245		return -ENODEV;
246
247	root = NULL;
248	while ((root = next_northbridge(root, root_ids)) != NULL)
249		root_count++;
250
251	if (root_count) {
252		roots_per_misc = root_count / misc_count;
253
254		/*
255		 * There should be _exactly_ N roots for each DF/SMN
256		 * interface.
257		 */
258		if (!roots_per_misc || (root_count % roots_per_misc)) {
259			pr_info("Unsupported AMD DF/PCI configuration found\n");
260			return -ENODEV;
261		}
262	}
263
264	nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
265	if (!nb)
266		return -ENOMEM;
267
268	amd_northbridges.nb = nb;
269	amd_northbridges.num = misc_count;
270
271	link = misc = root = NULL;
272	for (i = 0; i < amd_northbridges.num; i++) {
273		node_to_amd_nb(i)->root = root =
274			next_northbridge(root, root_ids);
275		node_to_amd_nb(i)->misc = misc =
276			next_northbridge(misc, misc_ids);
277		node_to_amd_nb(i)->link = link =
278			next_northbridge(link, link_ids);
279
280		/*
281		 * If there are more PCI root devices than data fabric/
282		 * system management network interfaces, then the (N)
283		 * PCI roots per DF/SMN interface are functionally the
284		 * same (for DF/SMN access) and N-1 are redundant.  N-1
285		 * PCI roots should be skipped per DF/SMN interface so
286		 * the following DF/SMN interfaces get mapped to
287		 * correct PCI roots.
288		 */
289		for (j = 1; j < roots_per_misc; j++)
290			root = next_northbridge(root, root_ids);
291	}
292
293	if (amd_gart_present())
 
 
294		amd_northbridges.flags |= AMD_NB_GART;
295
296	/*
297	 * Check for L3 cache presence.
298	 */
299	if (!cpuid_edx(0x80000006))
300		return 0;
301
302	/*
303	 * Some CPU families support L3 Cache Index Disable. There are some
304	 * limitations because of E382 and E388 on family 0x10.
305	 */
306	if (boot_cpu_data.x86 == 0x10 &&
307	    boot_cpu_data.x86_model >= 0x8 &&
308	    (boot_cpu_data.x86_model > 0x9 ||
309	     boot_cpu_data.x86_stepping >= 0x1))
310		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
311
312	if (boot_cpu_data.x86 == 0x15)
313		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
314
315	/* L3 cache partitioning is supported on family 0x15 */
316	if (boot_cpu_data.x86 == 0x15)
317		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
318
319	return 0;
320}
321EXPORT_SYMBOL_GPL(amd_cache_northbridges);
322
323/*
324 * Ignores subdevice/subvendor but as far as I can figure out
325 * they're useless anyways
326 */
327bool __init early_is_amd_nb(u32 device)
328{
329	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
330	const struct pci_device_id *id;
331	u32 vendor = device & 0xffff;
332
333	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
334	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
335		return false;
336
337	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
338		misc_ids = hygon_nb_misc_ids;
339
340	device >>= 16;
341	for (id = misc_ids; id->vendor; id++)
342		if (vendor == id->vendor && device == id->device)
343			return true;
344	return false;
345}
346
347struct resource *amd_get_mmconfig_range(struct resource *res)
348{
349	u32 address;
350	u64 base, msr;
351	unsigned int segn_busn_bits;
352
353	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
354	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
355		return NULL;
356
357	/* assume all cpus from fam10h have mmconfig */
358	if (boot_cpu_data.x86 < 0x10)
359		return NULL;
360
361	address = MSR_FAM10H_MMIO_CONF_BASE;
362	rdmsrl(address, msr);
363
364	/* mmconfig is not enabled */
365	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
366		return NULL;
367
368	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
369
370	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
371			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
372
373	res->flags = IORESOURCE_MEM;
374	res->start = base;
375	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
376	return res;
377}
378
379int amd_get_subcaches(int cpu)
380{
381	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
382	unsigned int mask;
 
383
384	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
385		return 0;
386
387	pci_read_config_dword(link, 0x1d4, &mask);
388
389	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
 
 
 
390}
391
392int amd_set_subcaches(int cpu, unsigned long mask)
393{
394	static unsigned int reset, ban;
395	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
396	unsigned int reg;
397	int cuid;
398
399	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
400		return -EINVAL;
401
402	/* if necessary, collect reset state of L3 partitioning and BAN mode */
403	if (reset == 0) {
404		pci_read_config_dword(nb->link, 0x1d4, &reset);
405		pci_read_config_dword(nb->misc, 0x1b8, &ban);
406		ban &= 0x180000;
407	}
408
409	/* deactivate BAN mode if any subcaches are to be disabled */
410	if (mask != 0xf) {
411		pci_read_config_dword(nb->misc, 0x1b8, &reg);
412		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
413	}
414
415	cuid = cpu_data(cpu).cpu_core_id;
 
 
416	mask <<= 4 * cuid;
417	mask |= (0xf ^ (1 << cuid)) << 26;
418
419	pci_write_config_dword(nb->link, 0x1d4, mask);
420
421	/* reset BAN mode if L3 partitioning returned to reset state */
422	pci_read_config_dword(nb->link, 0x1d4, &reg);
423	if (reg == reset) {
424		pci_read_config_dword(nb->misc, 0x1b8, &reg);
425		reg &= ~0x180000;
426		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
427	}
428
429	return 0;
430}
431
432static void amd_cache_gart(void)
433{
434	u16 i;
435
436	if (!amd_nb_has_feature(AMD_NB_GART))
437		return;
438
439	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
440	if (!flush_words) {
441		amd_northbridges.flags &= ~AMD_NB_GART;
442		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
443		return;
444	}
 
 
 
445
446	for (i = 0; i != amd_northbridges.num; i++)
447		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
448}
449
450void amd_flush_garts(void)
451{
452	int flushed, i;
453	unsigned long flags;
454	static DEFINE_SPINLOCK(gart_lock);
455
456	if (!amd_nb_has_feature(AMD_NB_GART))
457		return;
458
459	/*
460	 * Avoid races between AGP and IOMMU. In theory it's not needed
461	 * but I'm not sure if the hardware won't lose flush requests
462	 * when another is pending. This whole thing is so expensive anyways
463	 * that it doesn't matter to serialize more. -AK
464	 */
465	spin_lock_irqsave(&gart_lock, flags);
466	flushed = 0;
467	for (i = 0; i < amd_northbridges.num; i++) {
468		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
469				       flush_words[i] | 1);
470		flushed++;
471	}
472	for (i = 0; i < amd_northbridges.num; i++) {
473		u32 w;
474		/* Make sure the hardware actually executed the flush*/
475		for (;;) {
476			pci_read_config_dword(node_to_amd_nb(i)->misc,
477					      0x9c, &w);
478			if (!(w & 1))
479				break;
480			cpu_relax();
481		}
482	}
483	spin_unlock_irqrestore(&gart_lock, flags);
484	if (!flushed)
485		pr_notice("nothing to flush?\n");
486}
487EXPORT_SYMBOL_GPL(amd_flush_garts);
488
489static void __fix_erratum_688(void *info)
490{
491#define MSR_AMD64_IC_CFG 0xC0011021
492
493	msr_set_bit(MSR_AMD64_IC_CFG, 3);
494	msr_set_bit(MSR_AMD64_IC_CFG, 14);
495}
496
497/* Apply erratum 688 fix so machines without a BIOS fix work. */
498static __init void fix_erratum_688(void)
499{
500	struct pci_dev *F4;
501	u32 val;
502
503	if (boot_cpu_data.x86 != 0x14)
504		return;
505
506	if (!amd_northbridges.num)
507		return;
508
509	F4 = node_to_amd_nb(0)->link;
510	if (!F4)
511		return;
512
513	if (pci_read_config_dword(F4, 0x164, &val))
514		return;
515
516	if (val & BIT(2))
517		return;
518
519	on_each_cpu(__fix_erratum_688, NULL, 0);
520
521	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
522}
523
524static __init int init_amd_nbs(void)
525{
526	amd_cache_northbridges();
527	amd_cache_gart();
528
529	fix_erratum_688();
 
 
530
531	return 0;
532}
533
534/* This has to go after the PCI subsystem */
535fs_initcall(init_amd_nbs);
v3.1
 
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
 
 
 
  5#include <linux/types.h>
  6#include <linux/slab.h>
  7#include <linux/init.h>
  8#include <linux/errno.h>
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 
 11#include <asm/amd_nb.h>
 12
 
 
 
 
 
 
 
 
 
 
 
 13static u32 *flush_words;
 14
 
 
 
 
 
 
 
 
 
 
 15const struct pci_device_id amd_nb_misc_ids[] = {
 16	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 17	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 18	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 
 
 
 
 
 
 
 
 
 
 19	{}
 20};
 21EXPORT_SYMBOL(amd_nb_misc_ids);
 22
 23static struct pci_device_id amd_nb_link_ids[] = {
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25	{}
 26};
 27
 28const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 29	{ 0x00, 0x18, 0x20 },
 30	{ 0xff, 0x00, 0x20 },
 31	{ 0xfe, 0x00, 0x20 },
 32	{ }
 33};
 34
 35struct amd_northbridge_info amd_northbridges;
 36EXPORT_SYMBOL(amd_northbridges);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38static struct pci_dev *next_northbridge(struct pci_dev *dev,
 39					const struct pci_device_id *ids)
 40{
 41	do {
 42		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 43		if (!dev)
 44			break;
 45	} while (!pci_match_id(ids, dev));
 46	return dev;
 47}
 48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49int amd_cache_northbridges(void)
 50{
 51	u16 i = 0;
 
 
 
 52	struct amd_northbridge *nb;
 53	struct pci_dev *misc, *link;
 
 
 
 54
 55	if (amd_nb_num())
 56		return 0;
 57
 
 
 
 
 
 
 58	misc = NULL;
 59	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 60		i++;
 61
 62	if (i == 0)
 63		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64
 65	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 66	if (!nb)
 67		return -ENOMEM;
 68
 69	amd_northbridges.nb = nb;
 70	amd_northbridges.num = i;
 71
 72	link = misc = NULL;
 73	for (i = 0; i != amd_nb_num(); i++) {
 
 
 74		node_to_amd_nb(i)->misc = misc =
 75			next_northbridge(misc, amd_nb_misc_ids);
 76		node_to_amd_nb(i)->link = link =
 77			next_northbridge(link, amd_nb_link_ids);
 78        }
 
 
 
 
 
 
 
 
 
 
 
 
 79
 80	/* some CPU families (e.g. family 0x11) do not support GART */
 81	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
 82	    boot_cpu_data.x86 == 0x15)
 83		amd_northbridges.flags |= AMD_NB_GART;
 84
 85	/*
 
 
 
 
 
 
 86	 * Some CPU families support L3 Cache Index Disable. There are some
 87	 * limitations because of E382 and E388 on family 0x10.
 88	 */
 89	if (boot_cpu_data.x86 == 0x10 &&
 90	    boot_cpu_data.x86_model >= 0x8 &&
 91	    (boot_cpu_data.x86_model > 0x9 ||
 92	     boot_cpu_data.x86_mask >= 0x1))
 93		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 94
 95	if (boot_cpu_data.x86 == 0x15)
 96		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 97
 98	/* L3 cache partitioning is supported on family 0x15 */
 99	if (boot_cpu_data.x86 == 0x15)
100		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101
102	return 0;
103}
104EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105
106/*
107 * Ignores subdevice/subvendor but as far as I can figure out
108 * they're useless anyways
109 */
110bool __init early_is_amd_nb(u32 device)
111{
 
112	const struct pci_device_id *id;
113	u32 vendor = device & 0xffff;
114
 
 
 
 
 
 
 
115	device >>= 16;
116	for (id = amd_nb_misc_ids; id->vendor; id++)
117		if (vendor == id->vendor && device == id->device)
118			return true;
119	return false;
120}
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122int amd_get_subcaches(int cpu)
123{
124	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
125	unsigned int mask;
126	int cuid = 0;
127
128	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
129		return 0;
130
131	pci_read_config_dword(link, 0x1d4, &mask);
132
133#ifdef CONFIG_SMP
134	cuid = cpu_data(cpu).compute_unit_id;
135#endif
136	return (mask >> (4 * cuid)) & 0xf;
137}
138
139int amd_set_subcaches(int cpu, int mask)
140{
141	static unsigned int reset, ban;
142	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
143	unsigned int reg;
144	int cuid = 0;
145
146	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
147		return -EINVAL;
148
149	/* if necessary, collect reset state of L3 partitioning and BAN mode */
150	if (reset == 0) {
151		pci_read_config_dword(nb->link, 0x1d4, &reset);
152		pci_read_config_dword(nb->misc, 0x1b8, &ban);
153		ban &= 0x180000;
154	}
155
156	/* deactivate BAN mode if any subcaches are to be disabled */
157	if (mask != 0xf) {
158		pci_read_config_dword(nb->misc, 0x1b8, &reg);
159		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
160	}
161
162#ifdef CONFIG_SMP
163	cuid = cpu_data(cpu).compute_unit_id;
164#endif
165	mask <<= 4 * cuid;
166	mask |= (0xf ^ (1 << cuid)) << 26;
167
168	pci_write_config_dword(nb->link, 0x1d4, mask);
169
170	/* reset BAN mode if L3 partitioning returned to reset state */
171	pci_read_config_dword(nb->link, 0x1d4, &reg);
172	if (reg == reset) {
173		pci_read_config_dword(nb->misc, 0x1b8, &reg);
174		reg &= ~0x180000;
175		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
176	}
177
178	return 0;
179}
180
181static int amd_cache_gart(void)
182{
183	u16 i;
184
185       if (!amd_nb_has_feature(AMD_NB_GART))
186               return 0;
187
188       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
189       if (!flush_words) {
190               amd_northbridges.flags &= ~AMD_NB_GART;
191               return -ENOMEM;
192       }
193
194       for (i = 0; i != amd_nb_num(); i++)
195               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
196                                     &flush_words[i]);
197
198       return 0;
 
199}
200
201void amd_flush_garts(void)
202{
203	int flushed, i;
204	unsigned long flags;
205	static DEFINE_SPINLOCK(gart_lock);
206
207	if (!amd_nb_has_feature(AMD_NB_GART))
208		return;
209
210	/* Avoid races between AGP and IOMMU. In theory it's not needed
211	   but I'm not sure if the hardware won't lose flush requests
212	   when another is pending. This whole thing is so expensive anyways
213	   that it doesn't matter to serialize more. -AK */
 
 
214	spin_lock_irqsave(&gart_lock, flags);
215	flushed = 0;
216	for (i = 0; i < amd_nb_num(); i++) {
217		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
218				       flush_words[i] | 1);
219		flushed++;
220	}
221	for (i = 0; i < amd_nb_num(); i++) {
222		u32 w;
223		/* Make sure the hardware actually executed the flush*/
224		for (;;) {
225			pci_read_config_dword(node_to_amd_nb(i)->misc,
226					      0x9c, &w);
227			if (!(w & 1))
228				break;
229			cpu_relax();
230		}
231	}
232	spin_unlock_irqrestore(&gart_lock, flags);
233	if (!flushed)
234		printk("nothing to flush?\n");
235}
236EXPORT_SYMBOL_GPL(amd_flush_garts);
237
238static __init int init_amd_nbs(void)
 
 
 
 
 
 
 
 
 
239{
240	int err = 0;
 
 
 
 
 
 
 
241
242	err = amd_cache_northbridges();
 
 
 
 
 
 
 
 
 
 
243
244	if (err < 0)
245		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
 
 
 
 
 
246
247	if (amd_cache_gart() < 0)
248		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
249		       "GART support disabled.\n");
250
251	return err;
252}
253
254/* This has to go after the PCI subsystem */
255fs_initcall(init_amd_nbs);