Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
  5
  6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7
  8#include <linux/types.h>
  9#include <linux/slab.h>
 10#include <linux/init.h>
 11#include <linux/errno.h>
 12#include <linux/export.h>
 13#include <linux/spinlock.h>
 14#include <asm/amd_nb.h>
 15
 16#define PCI_DEVICE_ID_AMD_17H_ROOT	0x1450
 17#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT	0x15d0
 18#define PCI_DEVICE_ID_AMD_17H_DF_F3	0x1463
 19#define PCI_DEVICE_ID_AMD_17H_DF_F4	0x1464
 20#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
 21#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
 22
 23/* Protect the PCI config register pairs used for SMN and DF indirect access. */
 24static DEFINE_MUTEX(smn_mutex);
 25
 26static u32 *flush_words;
 27
 28static const struct pci_device_id amd_root_ids[] = {
 29	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
 30	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
 31	{}
 32};
 33
 34#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
 35
 36const struct pci_device_id amd_nb_misc_ids[] = {
 37	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 38	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 39	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 40	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 41	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 42	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 43	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 44	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 45	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 46	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
 47	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 48	{}
 49};
 50EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
 51
 52static const struct pci_device_id amd_nb_link_ids[] = {
 53	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 54	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 55	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
 56	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 57	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 58	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
 59	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
 60	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
 61	{}
 62};
 63
 64const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 65	{ 0x00, 0x18, 0x20 },
 66	{ 0xff, 0x00, 0x20 },
 67	{ 0xfe, 0x00, 0x20 },
 68	{ }
 69};
 70
 71static struct amd_northbridge_info amd_northbridges;
 72
 73u16 amd_nb_num(void)
 74{
 75	return amd_northbridges.num;
 76}
 77EXPORT_SYMBOL_GPL(amd_nb_num);
 78
 79bool amd_nb_has_feature(unsigned int feature)
 80{
 81	return ((amd_northbridges.flags & feature) == feature);
 82}
 83EXPORT_SYMBOL_GPL(amd_nb_has_feature);
 84
 85struct amd_northbridge *node_to_amd_nb(int node)
 86{
 87	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
 88}
 89EXPORT_SYMBOL_GPL(node_to_amd_nb);
 90
 91static struct pci_dev *next_northbridge(struct pci_dev *dev,
 92					const struct pci_device_id *ids)
 93{
 94	do {
 95		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 96		if (!dev)
 97			break;
 98	} while (!pci_match_id(ids, dev));
 99	return dev;
100}
101
102static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
103{
104	struct pci_dev *root;
105	int err = -ENODEV;
106
107	if (node >= amd_northbridges.num)
108		goto out;
109
110	root = node_to_amd_nb(node)->root;
111	if (!root)
112		goto out;
113
114	mutex_lock(&smn_mutex);
115
116	err = pci_write_config_dword(root, 0x60, address);
117	if (err) {
118		pr_warn("Error programming SMN address 0x%x.\n", address);
119		goto out_unlock;
120	}
121
122	err = (write ? pci_write_config_dword(root, 0x64, *value)
123		     : pci_read_config_dword(root, 0x64, value));
124	if (err)
125		pr_warn("Error %s SMN address 0x%x.\n",
126			(write ? "writing to" : "reading from"), address);
127
128out_unlock:
129	mutex_unlock(&smn_mutex);
130
131out:
132	return err;
133}
134
135int amd_smn_read(u16 node, u32 address, u32 *value)
136{
137	return __amd_smn_rw(node, address, value, false);
138}
139EXPORT_SYMBOL_GPL(amd_smn_read);
140
141int amd_smn_write(u16 node, u32 address, u32 value)
142{
143	return __amd_smn_rw(node, address, &value, true);
144}
145EXPORT_SYMBOL_GPL(amd_smn_write);
146
147/*
148 * Data Fabric Indirect Access uses FICAA/FICAD.
149 *
150 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
151 * on the device's Instance Id and the PCI function and register offset of
152 * the desired register.
153 *
154 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
155 * and FICAD HI registers but so far we only need the LO register.
156 */
157int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
158{
159	struct pci_dev *F4;
160	u32 ficaa;
161	int err = -ENODEV;
162
163	if (node >= amd_northbridges.num)
164		goto out;
165
166	F4 = node_to_amd_nb(node)->link;
167	if (!F4)
168		goto out;
169
170	ficaa  = 1;
171	ficaa |= reg & 0x3FC;
172	ficaa |= (func & 0x7) << 11;
173	ficaa |= instance_id << 16;
174
175	mutex_lock(&smn_mutex);
176
177	err = pci_write_config_dword(F4, 0x5C, ficaa);
178	if (err) {
179		pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
180		goto out_unlock;
181	}
182
183	err = pci_read_config_dword(F4, 0x98, lo);
184	if (err)
185		pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
186
187out_unlock:
188	mutex_unlock(&smn_mutex);
189
190out:
191	return err;
192}
193EXPORT_SYMBOL_GPL(amd_df_indirect_read);
194
195int amd_cache_northbridges(void)
196{
197	u16 i = 0;
198	struct amd_northbridge *nb;
199	struct pci_dev *root, *misc, *link;
200
201	if (amd_northbridges.num)
202		return 0;
203
204	misc = NULL;
205	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
206		i++;
207
208	if (!i)
209		return -ENODEV;
210
211	nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
212	if (!nb)
213		return -ENOMEM;
214
215	amd_northbridges.nb = nb;
216	amd_northbridges.num = i;
217
218	link = misc = root = NULL;
219	for (i = 0; i != amd_northbridges.num; i++) {
220		node_to_amd_nb(i)->root = root =
221			next_northbridge(root, amd_root_ids);
222		node_to_amd_nb(i)->misc = misc =
223			next_northbridge(misc, amd_nb_misc_ids);
224		node_to_amd_nb(i)->link = link =
225			next_northbridge(link, amd_nb_link_ids);
226	}
227
228	if (amd_gart_present())
 
 
229		amd_northbridges.flags |= AMD_NB_GART;
230
231	/*
232	 * Check for L3 cache presence.
233	 */
234	if (!cpuid_edx(0x80000006))
235		return 0;
236
237	/*
238	 * Some CPU families support L3 Cache Index Disable. There are some
239	 * limitations because of E382 and E388 on family 0x10.
240	 */
241	if (boot_cpu_data.x86 == 0x10 &&
242	    boot_cpu_data.x86_model >= 0x8 &&
243	    (boot_cpu_data.x86_model > 0x9 ||
244	     boot_cpu_data.x86_stepping >= 0x1))
245		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
246
247	if (boot_cpu_data.x86 == 0x15)
248		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
249
250	/* L3 cache partitioning is supported on family 0x15 */
251	if (boot_cpu_data.x86 == 0x15)
252		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
253
254	return 0;
255}
256EXPORT_SYMBOL_GPL(amd_cache_northbridges);
257
258/*
259 * Ignores subdevice/subvendor but as far as I can figure out
260 * they're useless anyways
261 */
262bool __init early_is_amd_nb(u32 device)
263{
264	const struct pci_device_id *id;
265	u32 vendor = device & 0xffff;
266
267	device >>= 16;
268	for (id = amd_nb_misc_ids; id->vendor; id++)
269		if (vendor == id->vendor && device == id->device)
270			return true;
271	return false;
272}
273
274struct resource *amd_get_mmconfig_range(struct resource *res)
275{
276	u32 address;
277	u64 base, msr;
278	unsigned int segn_busn_bits;
279
280	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
281		return NULL;
282
283	/* assume all cpus from fam10h have mmconfig */
284	if (boot_cpu_data.x86 < 0x10)
285		return NULL;
286
287	address = MSR_FAM10H_MMIO_CONF_BASE;
288	rdmsrl(address, msr);
289
290	/* mmconfig is not enabled */
291	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
292		return NULL;
293
294	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
295
296	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
297			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
298
299	res->flags = IORESOURCE_MEM;
300	res->start = base;
301	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
302	return res;
303}
304
305int amd_get_subcaches(int cpu)
306{
307	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
308	unsigned int mask;
 
309
310	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
311		return 0;
312
313	pci_read_config_dword(link, 0x1d4, &mask);
314
315	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
 
316}
317
318int amd_set_subcaches(int cpu, unsigned long mask)
319{
320	static unsigned int reset, ban;
321	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
322	unsigned int reg;
323	int cuid;
324
325	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
326		return -EINVAL;
327
328	/* if necessary, collect reset state of L3 partitioning and BAN mode */
329	if (reset == 0) {
330		pci_read_config_dword(nb->link, 0x1d4, &reset);
331		pci_read_config_dword(nb->misc, 0x1b8, &ban);
332		ban &= 0x180000;
333	}
334
335	/* deactivate BAN mode if any subcaches are to be disabled */
336	if (mask != 0xf) {
337		pci_read_config_dword(nb->misc, 0x1b8, &reg);
338		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
339	}
340
341	cuid = cpu_data(cpu).cpu_core_id;
342	mask <<= 4 * cuid;
343	mask |= (0xf ^ (1 << cuid)) << 26;
344
345	pci_write_config_dword(nb->link, 0x1d4, mask);
346
347	/* reset BAN mode if L3 partitioning returned to reset state */
348	pci_read_config_dword(nb->link, 0x1d4, &reg);
349	if (reg == reset) {
350		pci_read_config_dword(nb->misc, 0x1b8, &reg);
351		reg &= ~0x180000;
352		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
353	}
354
355	return 0;
356}
357
358static void amd_cache_gart(void)
359{
360	u16 i;
361
362	if (!amd_nb_has_feature(AMD_NB_GART))
363		return;
364
365	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
366	if (!flush_words) {
367		amd_northbridges.flags &= ~AMD_NB_GART;
368		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
369		return;
370	}
 
 
 
371
372	for (i = 0; i != amd_northbridges.num; i++)
373		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
374}
375
376void amd_flush_garts(void)
377{
378	int flushed, i;
379	unsigned long flags;
380	static DEFINE_SPINLOCK(gart_lock);
381
382	if (!amd_nb_has_feature(AMD_NB_GART))
383		return;
384
385	/*
386	 * Avoid races between AGP and IOMMU. In theory it's not needed
387	 * but I'm not sure if the hardware won't lose flush requests
388	 * when another is pending. This whole thing is so expensive anyways
389	 * that it doesn't matter to serialize more. -AK
390	 */
391	spin_lock_irqsave(&gart_lock, flags);
392	flushed = 0;
393	for (i = 0; i < amd_northbridges.num; i++) {
394		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
395				       flush_words[i] | 1);
396		flushed++;
397	}
398	for (i = 0; i < amd_northbridges.num; i++) {
399		u32 w;
400		/* Make sure the hardware actually executed the flush*/
401		for (;;) {
402			pci_read_config_dword(node_to_amd_nb(i)->misc,
403					      0x9c, &w);
404			if (!(w & 1))
405				break;
406			cpu_relax();
407		}
408	}
409	spin_unlock_irqrestore(&gart_lock, flags);
410	if (!flushed)
411		pr_notice("nothing to flush?\n");
412}
413EXPORT_SYMBOL_GPL(amd_flush_garts);
414
415static void __fix_erratum_688(void *info)
416{
417#define MSR_AMD64_IC_CFG 0xC0011021
418
419	msr_set_bit(MSR_AMD64_IC_CFG, 3);
420	msr_set_bit(MSR_AMD64_IC_CFG, 14);
421}
422
423/* Apply erratum 688 fix so machines without a BIOS fix work. */
424static __init void fix_erratum_688(void)
425{
426	struct pci_dev *F4;
427	u32 val;
428
429	if (boot_cpu_data.x86 != 0x14)
430		return;
431
432	if (!amd_northbridges.num)
433		return;
434
435	F4 = node_to_amd_nb(0)->link;
436	if (!F4)
437		return;
438
439	if (pci_read_config_dword(F4, 0x164, &val))
440		return;
441
442	if (val & BIT(2))
443		return;
444
445	on_each_cpu(__fix_erratum_688, NULL, 0);
446
447	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
448}
449
450static __init int init_amd_nbs(void)
451{
452	amd_cache_northbridges();
453	amd_cache_gart();
454
455	fix_erratum_688();
456
457	return 0;
458}
459
460/* This has to go after the PCI subsystem */
461fs_initcall(init_amd_nbs);
v3.5.6
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
 
 
 
  5#include <linux/types.h>
  6#include <linux/slab.h>
  7#include <linux/init.h>
  8#include <linux/errno.h>
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 11#include <asm/amd_nb.h>
 12
 
 
 
 
 
 
 
 
 
 
 13static u32 *flush_words;
 14
 
 
 
 
 
 
 
 
 15const struct pci_device_id amd_nb_misc_ids[] = {
 16	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 17	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 18	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 
 
 
 
 
 
 
 
 19	{}
 20};
 21EXPORT_SYMBOL(amd_nb_misc_ids);
 22
 23static struct pci_device_id amd_nb_link_ids[] = {
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 
 
 
 
 
 
 
 25	{}
 26};
 27
 28const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 29	{ 0x00, 0x18, 0x20 },
 30	{ 0xff, 0x00, 0x20 },
 31	{ 0xfe, 0x00, 0x20 },
 32	{ }
 33};
 34
 35struct amd_northbridge_info amd_northbridges;
 36EXPORT_SYMBOL(amd_northbridges);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38static struct pci_dev *next_northbridge(struct pci_dev *dev,
 39					const struct pci_device_id *ids)
 40{
 41	do {
 42		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 43		if (!dev)
 44			break;
 45	} while (!pci_match_id(ids, dev));
 46	return dev;
 47}
 48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49int amd_cache_northbridges(void)
 50{
 51	u16 i = 0;
 52	struct amd_northbridge *nb;
 53	struct pci_dev *misc, *link;
 54
 55	if (amd_nb_num())
 56		return 0;
 57
 58	misc = NULL;
 59	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 60		i++;
 61
 62	if (i == 0)
 63		return 0;
 64
 65	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 66	if (!nb)
 67		return -ENOMEM;
 68
 69	amd_northbridges.nb = nb;
 70	amd_northbridges.num = i;
 71
 72	link = misc = NULL;
 73	for (i = 0; i != amd_nb_num(); i++) {
 
 
 74		node_to_amd_nb(i)->misc = misc =
 75			next_northbridge(misc, amd_nb_misc_ids);
 76		node_to_amd_nb(i)->link = link =
 77			next_northbridge(link, amd_nb_link_ids);
 78        }
 79
 80	/* some CPU families (e.g. family 0x11) do not support GART */
 81	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
 82	    boot_cpu_data.x86 == 0x15)
 83		amd_northbridges.flags |= AMD_NB_GART;
 84
 85	/*
 
 
 
 
 
 
 86	 * Some CPU families support L3 Cache Index Disable. There are some
 87	 * limitations because of E382 and E388 on family 0x10.
 88	 */
 89	if (boot_cpu_data.x86 == 0x10 &&
 90	    boot_cpu_data.x86_model >= 0x8 &&
 91	    (boot_cpu_data.x86_model > 0x9 ||
 92	     boot_cpu_data.x86_mask >= 0x1))
 93		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 94
 95	if (boot_cpu_data.x86 == 0x15)
 96		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 97
 98	/* L3 cache partitioning is supported on family 0x15 */
 99	if (boot_cpu_data.x86 == 0x15)
100		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101
102	return 0;
103}
104EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105
106/*
107 * Ignores subdevice/subvendor but as far as I can figure out
108 * they're useless anyways
109 */
110bool __init early_is_amd_nb(u32 device)
111{
112	const struct pci_device_id *id;
113	u32 vendor = device & 0xffff;
114
115	device >>= 16;
116	for (id = amd_nb_misc_ids; id->vendor; id++)
117		if (vendor == id->vendor && device == id->device)
118			return true;
119	return false;
120}
121
122struct resource *amd_get_mmconfig_range(struct resource *res)
123{
124	u32 address;
125	u64 base, msr;
126	unsigned segn_busn_bits;
127
128	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
129		return NULL;
130
131	/* assume all cpus from fam10h have mmconfig */
132        if (boot_cpu_data.x86 < 0x10)
133		return NULL;
134
135	address = MSR_FAM10H_MMIO_CONF_BASE;
136	rdmsrl(address, msr);
137
138	/* mmconfig is not enabled */
139	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
140		return NULL;
141
142	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
143
144	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
145			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
146
147	res->flags = IORESOURCE_MEM;
148	res->start = base;
149	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
150	return res;
151}
152
153int amd_get_subcaches(int cpu)
154{
155	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
156	unsigned int mask;
157	int cuid;
158
159	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
160		return 0;
161
162	pci_read_config_dword(link, 0x1d4, &mask);
163
164	cuid = cpu_data(cpu).compute_unit_id;
165	return (mask >> (4 * cuid)) & 0xf;
166}
167
168int amd_set_subcaches(int cpu, int mask)
169{
170	static unsigned int reset, ban;
171	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
172	unsigned int reg;
173	int cuid;
174
175	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
176		return -EINVAL;
177
178	/* if necessary, collect reset state of L3 partitioning and BAN mode */
179	if (reset == 0) {
180		pci_read_config_dword(nb->link, 0x1d4, &reset);
181		pci_read_config_dword(nb->misc, 0x1b8, &ban);
182		ban &= 0x180000;
183	}
184
185	/* deactivate BAN mode if any subcaches are to be disabled */
186	if (mask != 0xf) {
187		pci_read_config_dword(nb->misc, 0x1b8, &reg);
188		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
189	}
190
191	cuid = cpu_data(cpu).compute_unit_id;
192	mask <<= 4 * cuid;
193	mask |= (0xf ^ (1 << cuid)) << 26;
194
195	pci_write_config_dword(nb->link, 0x1d4, mask);
196
197	/* reset BAN mode if L3 partitioning returned to reset state */
198	pci_read_config_dword(nb->link, 0x1d4, &reg);
199	if (reg == reset) {
200		pci_read_config_dword(nb->misc, 0x1b8, &reg);
201		reg &= ~0x180000;
202		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
203	}
204
205	return 0;
206}
207
208static int amd_cache_gart(void)
209{
210	u16 i;
211
212       if (!amd_nb_has_feature(AMD_NB_GART))
213               return 0;
214
215       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
216       if (!flush_words) {
217               amd_northbridges.flags &= ~AMD_NB_GART;
218               return -ENOMEM;
219       }
220
221       for (i = 0; i != amd_nb_num(); i++)
222               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
223                                     &flush_words[i]);
224
225       return 0;
 
226}
227
228void amd_flush_garts(void)
229{
230	int flushed, i;
231	unsigned long flags;
232	static DEFINE_SPINLOCK(gart_lock);
233
234	if (!amd_nb_has_feature(AMD_NB_GART))
235		return;
236
237	/* Avoid races between AGP and IOMMU. In theory it's not needed
238	   but I'm not sure if the hardware won't lose flush requests
239	   when another is pending. This whole thing is so expensive anyways
240	   that it doesn't matter to serialize more. -AK */
 
 
241	spin_lock_irqsave(&gart_lock, flags);
242	flushed = 0;
243	for (i = 0; i < amd_nb_num(); i++) {
244		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
245				       flush_words[i] | 1);
246		flushed++;
247	}
248	for (i = 0; i < amd_nb_num(); i++) {
249		u32 w;
250		/* Make sure the hardware actually executed the flush*/
251		for (;;) {
252			pci_read_config_dword(node_to_amd_nb(i)->misc,
253					      0x9c, &w);
254			if (!(w & 1))
255				break;
256			cpu_relax();
257		}
258	}
259	spin_unlock_irqrestore(&gart_lock, flags);
260	if (!flushed)
261		printk("nothing to flush?\n");
262}
263EXPORT_SYMBOL_GPL(amd_flush_garts);
264
265static __init int init_amd_nbs(void)
 
 
 
 
 
 
 
 
 
266{
267	int err = 0;
 
 
 
 
268
269	err = amd_cache_northbridges();
 
270
271	if (err < 0)
272		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
 
273
274	if (amd_cache_gart() < 0)
275		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
276		       "GART support disabled.\n");
 
 
277
278	return err;
 
 
 
 
 
 
 
 
 
 
 
 
279}
280
281/* This has to go after the PCI subsystem */
282fs_initcall(init_amd_nbs);