Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
  5
  6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7
  8#include <linux/types.h>
  9#include <linux/slab.h>
 10#include <linux/init.h>
 11#include <linux/errno.h>
 12#include <linux/module.h>
 13#include <linux/spinlock.h>
 14#include <asm/amd_nb.h>
 15
 16static u32 *flush_words;
 17
 18const struct pci_device_id amd_nb_misc_ids[] = {
 19	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 20	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 21	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 22	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 23	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 25	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 26	{}
 27};
 28EXPORT_SYMBOL(amd_nb_misc_ids);
 29
 30static const struct pci_device_id amd_nb_link_ids[] = {
 31	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 32	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 33	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 34	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 35	{}
 36};
 37
 38const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 39	{ 0x00, 0x18, 0x20 },
 40	{ 0xff, 0x00, 0x20 },
 41	{ 0xfe, 0x00, 0x20 },
 42	{ }
 43};
 44
 45struct amd_northbridge_info amd_northbridges;
 46EXPORT_SYMBOL(amd_northbridges);
 47
 48static struct pci_dev *next_northbridge(struct pci_dev *dev,
 49					const struct pci_device_id *ids)
 50{
 51	do {
 52		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 53		if (!dev)
 54			break;
 55	} while (!pci_match_id(ids, dev));
 56	return dev;
 57}
 58
 59int amd_cache_northbridges(void)
 60{
 61	u16 i = 0;
 62	struct amd_northbridge *nb;
 63	struct pci_dev *misc, *link;
 64
 65	if (amd_nb_num())
 66		return 0;
 67
 68	misc = NULL;
 69	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 70		i++;
 71
 72	if (i == 0)
 73		return 0;
 74
 75	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 76	if (!nb)
 77		return -ENOMEM;
 78
 79	amd_northbridges.nb = nb;
 80	amd_northbridges.num = i;
 81
 82	link = misc = NULL;
 83	for (i = 0; i != amd_nb_num(); i++) {
 84		node_to_amd_nb(i)->misc = misc =
 85			next_northbridge(misc, amd_nb_misc_ids);
 86		node_to_amd_nb(i)->link = link =
 87			next_northbridge(link, amd_nb_link_ids);
 88	}
 89
 90	/* GART present only on Fam15h upto model 0fh */
 91	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
 92	    (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
 93		amd_northbridges.flags |= AMD_NB_GART;
 94
 95	/*
 96	 * Check for L3 cache presence.
 97	 */
 98	if (!cpuid_edx(0x80000006))
 99		return 0;
100
101	/*
102	 * Some CPU families support L3 Cache Index Disable. There are some
103	 * limitations because of E382 and E388 on family 0x10.
104	 */
105	if (boot_cpu_data.x86 == 0x10 &&
106	    boot_cpu_data.x86_model >= 0x8 &&
107	    (boot_cpu_data.x86_model > 0x9 ||
108	     boot_cpu_data.x86_mask >= 0x1))
109		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
110
111	if (boot_cpu_data.x86 == 0x15)
112		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
113
114	/* L3 cache partitioning is supported on family 0x15 */
115	if (boot_cpu_data.x86 == 0x15)
116		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
117
118	return 0;
119}
120EXPORT_SYMBOL_GPL(amd_cache_northbridges);
121
122/*
123 * Ignores subdevice/subvendor but as far as I can figure out
124 * they're useless anyways
125 */
126bool __init early_is_amd_nb(u32 device)
127{
128	const struct pci_device_id *id;
129	u32 vendor = device & 0xffff;
130
131	device >>= 16;
132	for (id = amd_nb_misc_ids; id->vendor; id++)
133		if (vendor == id->vendor && device == id->device)
134			return true;
135	return false;
136}
137
138struct resource *amd_get_mmconfig_range(struct resource *res)
139{
140	u32 address;
141	u64 base, msr;
142	unsigned segn_busn_bits;
143
144	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
145		return NULL;
146
147	/* assume all cpus from fam10h have mmconfig */
148        if (boot_cpu_data.x86 < 0x10)
149		return NULL;
150
151	address = MSR_FAM10H_MMIO_CONF_BASE;
152	rdmsrl(address, msr);
153
154	/* mmconfig is not enabled */
155	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
156		return NULL;
157
158	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
159
160	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
161			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
162
163	res->flags = IORESOURCE_MEM;
164	res->start = base;
165	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
166	return res;
167}
168
169int amd_get_subcaches(int cpu)
170{
171	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
172	unsigned int mask;
173	int cuid;
174
175	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
176		return 0;
177
178	pci_read_config_dword(link, 0x1d4, &mask);
179
 
180	cuid = cpu_data(cpu).compute_unit_id;
 
181	return (mask >> (4 * cuid)) & 0xf;
182}
183
184int amd_set_subcaches(int cpu, unsigned long mask)
185{
186	static unsigned int reset, ban;
187	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
188	unsigned int reg;
189	int cuid;
190
191	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
192		return -EINVAL;
193
194	/* if necessary, collect reset state of L3 partitioning and BAN mode */
195	if (reset == 0) {
196		pci_read_config_dword(nb->link, 0x1d4, &reset);
197		pci_read_config_dword(nb->misc, 0x1b8, &ban);
198		ban &= 0x180000;
199	}
200
201	/* deactivate BAN mode if any subcaches are to be disabled */
202	if (mask != 0xf) {
203		pci_read_config_dword(nb->misc, 0x1b8, &reg);
204		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
205	}
206
 
207	cuid = cpu_data(cpu).compute_unit_id;
 
208	mask <<= 4 * cuid;
209	mask |= (0xf ^ (1 << cuid)) << 26;
210
211	pci_write_config_dword(nb->link, 0x1d4, mask);
212
213	/* reset BAN mode if L3 partitioning returned to reset state */
214	pci_read_config_dword(nb->link, 0x1d4, &reg);
215	if (reg == reset) {
216		pci_read_config_dword(nb->misc, 0x1b8, &reg);
217		reg &= ~0x180000;
218		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
219	}
220
221	return 0;
222}
223
224static int amd_cache_gart(void)
225{
226	u16 i;
227
228       if (!amd_nb_has_feature(AMD_NB_GART))
229               return 0;
230
231       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
232       if (!flush_words) {
233               amd_northbridges.flags &= ~AMD_NB_GART;
234               return -ENOMEM;
235       }
236
237       for (i = 0; i != amd_nb_num(); i++)
238               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
239                                     &flush_words[i]);
240
241       return 0;
242}
243
244void amd_flush_garts(void)
245{
246	int flushed, i;
247	unsigned long flags;
248	static DEFINE_SPINLOCK(gart_lock);
249
250	if (!amd_nb_has_feature(AMD_NB_GART))
251		return;
252
253	/* Avoid races between AGP and IOMMU. In theory it's not needed
254	   but I'm not sure if the hardware won't lose flush requests
255	   when another is pending. This whole thing is so expensive anyways
256	   that it doesn't matter to serialize more. -AK */
257	spin_lock_irqsave(&gart_lock, flags);
258	flushed = 0;
259	for (i = 0; i < amd_nb_num(); i++) {
260		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
261				       flush_words[i] | 1);
262		flushed++;
263	}
264	for (i = 0; i < amd_nb_num(); i++) {
265		u32 w;
266		/* Make sure the hardware actually executed the flush*/
267		for (;;) {
268			pci_read_config_dword(node_to_amd_nb(i)->misc,
269					      0x9c, &w);
270			if (!(w & 1))
271				break;
272			cpu_relax();
273		}
274	}
275	spin_unlock_irqrestore(&gart_lock, flags);
276	if (!flushed)
277		pr_notice("nothing to flush?\n");
278}
279EXPORT_SYMBOL_GPL(amd_flush_garts);
280
281static __init int init_amd_nbs(void)
282{
283	int err = 0;
284
285	err = amd_cache_northbridges();
286
287	if (err < 0)
288		pr_notice("Cannot enumerate AMD northbridges\n");
289
290	if (amd_cache_gart() < 0)
291		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
 
292
293	return err;
294}
295
296/* This has to go after the PCI subsystem */
297fs_initcall(init_amd_nbs);
v3.1
  1/*
  2 * Shared support code for AMD K8 northbridges and derivates.
  3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4 */
 
 
 
  5#include <linux/types.h>
  6#include <linux/slab.h>
  7#include <linux/init.h>
  8#include <linux/errno.h>
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 11#include <asm/amd_nb.h>
 12
 13static u32 *flush_words;
 14
 15const struct pci_device_id amd_nb_misc_ids[] = {
 16	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 17	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 18	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 
 
 
 
 19	{}
 20};
 21EXPORT_SYMBOL(amd_nb_misc_ids);
 22
 23static struct pci_device_id amd_nb_link_ids[] = {
 24	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 
 
 
 25	{}
 26};
 27
 28const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 29	{ 0x00, 0x18, 0x20 },
 30	{ 0xff, 0x00, 0x20 },
 31	{ 0xfe, 0x00, 0x20 },
 32	{ }
 33};
 34
 35struct amd_northbridge_info amd_northbridges;
 36EXPORT_SYMBOL(amd_northbridges);
 37
 38static struct pci_dev *next_northbridge(struct pci_dev *dev,
 39					const struct pci_device_id *ids)
 40{
 41	do {
 42		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 43		if (!dev)
 44			break;
 45	} while (!pci_match_id(ids, dev));
 46	return dev;
 47}
 48
 49int amd_cache_northbridges(void)
 50{
 51	u16 i = 0;
 52	struct amd_northbridge *nb;
 53	struct pci_dev *misc, *link;
 54
 55	if (amd_nb_num())
 56		return 0;
 57
 58	misc = NULL;
 59	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 60		i++;
 61
 62	if (i == 0)
 63		return 0;
 64
 65	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 66	if (!nb)
 67		return -ENOMEM;
 68
 69	amd_northbridges.nb = nb;
 70	amd_northbridges.num = i;
 71
 72	link = misc = NULL;
 73	for (i = 0; i != amd_nb_num(); i++) {
 74		node_to_amd_nb(i)->misc = misc =
 75			next_northbridge(misc, amd_nb_misc_ids);
 76		node_to_amd_nb(i)->link = link =
 77			next_northbridge(link, amd_nb_link_ids);
 78        }
 79
 80	/* some CPU families (e.g. family 0x11) do not support GART */
 81	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
 82	    boot_cpu_data.x86 == 0x15)
 83		amd_northbridges.flags |= AMD_NB_GART;
 84
 85	/*
 
 
 
 
 
 
 86	 * Some CPU families support L3 Cache Index Disable. There are some
 87	 * limitations because of E382 and E388 on family 0x10.
 88	 */
 89	if (boot_cpu_data.x86 == 0x10 &&
 90	    boot_cpu_data.x86_model >= 0x8 &&
 91	    (boot_cpu_data.x86_model > 0x9 ||
 92	     boot_cpu_data.x86_mask >= 0x1))
 93		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 94
 95	if (boot_cpu_data.x86 == 0x15)
 96		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 97
 98	/* L3 cache partitioning is supported on family 0x15 */
 99	if (boot_cpu_data.x86 == 0x15)
100		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101
102	return 0;
103}
104EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105
106/*
107 * Ignores subdevice/subvendor but as far as I can figure out
108 * they're useless anyways
109 */
110bool __init early_is_amd_nb(u32 device)
111{
112	const struct pci_device_id *id;
113	u32 vendor = device & 0xffff;
114
115	device >>= 16;
116	for (id = amd_nb_misc_ids; id->vendor; id++)
117		if (vendor == id->vendor && device == id->device)
118			return true;
119	return false;
120}
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122int amd_get_subcaches(int cpu)
123{
124	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
125	unsigned int mask;
126	int cuid = 0;
127
128	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
129		return 0;
130
131	pci_read_config_dword(link, 0x1d4, &mask);
132
133#ifdef CONFIG_SMP
134	cuid = cpu_data(cpu).compute_unit_id;
135#endif
136	return (mask >> (4 * cuid)) & 0xf;
137}
138
139int amd_set_subcaches(int cpu, int mask)
140{
141	static unsigned int reset, ban;
142	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
143	unsigned int reg;
144	int cuid = 0;
145
146	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
147		return -EINVAL;
148
149	/* if necessary, collect reset state of L3 partitioning and BAN mode */
150	if (reset == 0) {
151		pci_read_config_dword(nb->link, 0x1d4, &reset);
152		pci_read_config_dword(nb->misc, 0x1b8, &ban);
153		ban &= 0x180000;
154	}
155
156	/* deactivate BAN mode if any subcaches are to be disabled */
157	if (mask != 0xf) {
158		pci_read_config_dword(nb->misc, 0x1b8, &reg);
159		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
160	}
161
162#ifdef CONFIG_SMP
163	cuid = cpu_data(cpu).compute_unit_id;
164#endif
165	mask <<= 4 * cuid;
166	mask |= (0xf ^ (1 << cuid)) << 26;
167
168	pci_write_config_dword(nb->link, 0x1d4, mask);
169
170	/* reset BAN mode if L3 partitioning returned to reset state */
171	pci_read_config_dword(nb->link, 0x1d4, &reg);
172	if (reg == reset) {
173		pci_read_config_dword(nb->misc, 0x1b8, &reg);
174		reg &= ~0x180000;
175		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
176	}
177
178	return 0;
179}
180
181static int amd_cache_gart(void)
182{
183	u16 i;
184
185       if (!amd_nb_has_feature(AMD_NB_GART))
186               return 0;
187
188       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
189       if (!flush_words) {
190               amd_northbridges.flags &= ~AMD_NB_GART;
191               return -ENOMEM;
192       }
193
194       for (i = 0; i != amd_nb_num(); i++)
195               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
196                                     &flush_words[i]);
197
198       return 0;
199}
200
201void amd_flush_garts(void)
202{
203	int flushed, i;
204	unsigned long flags;
205	static DEFINE_SPINLOCK(gart_lock);
206
207	if (!amd_nb_has_feature(AMD_NB_GART))
208		return;
209
210	/* Avoid races between AGP and IOMMU. In theory it's not needed
211	   but I'm not sure if the hardware won't lose flush requests
212	   when another is pending. This whole thing is so expensive anyways
213	   that it doesn't matter to serialize more. -AK */
214	spin_lock_irqsave(&gart_lock, flags);
215	flushed = 0;
216	for (i = 0; i < amd_nb_num(); i++) {
217		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
218				       flush_words[i] | 1);
219		flushed++;
220	}
221	for (i = 0; i < amd_nb_num(); i++) {
222		u32 w;
223		/* Make sure the hardware actually executed the flush*/
224		for (;;) {
225			pci_read_config_dword(node_to_amd_nb(i)->misc,
226					      0x9c, &w);
227			if (!(w & 1))
228				break;
229			cpu_relax();
230		}
231	}
232	spin_unlock_irqrestore(&gart_lock, flags);
233	if (!flushed)
234		printk("nothing to flush?\n");
235}
236EXPORT_SYMBOL_GPL(amd_flush_garts);
237
238static __init int init_amd_nbs(void)
239{
240	int err = 0;
241
242	err = amd_cache_northbridges();
243
244	if (err < 0)
245		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
246
247	if (amd_cache_gart() < 0)
248		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
249		       "GART support disabled.\n");
250
251	return err;
252}
253
254/* This has to go after the PCI subsystem */
255fs_initcall(init_amd_nbs);