Loading...
1/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
5#include <linux/types.h>
6#include <linux/slab.h>
7#include <linux/init.h>
8#include <linux/errno.h>
9#include <linux/module.h>
10#include <linux/spinlock.h>
11#include <asm/amd_nb.h>
12
13static u32 *flush_words;
14
15const struct pci_device_id amd_nb_misc_ids[] = {
16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
19 {}
20};
21EXPORT_SYMBOL(amd_nb_misc_ids);
22
23static struct pci_device_id amd_nb_link_ids[] = {
24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
25 {}
26};
27
28const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
29 { 0x00, 0x18, 0x20 },
30 { 0xff, 0x00, 0x20 },
31 { 0xfe, 0x00, 0x20 },
32 { }
33};
34
35struct amd_northbridge_info amd_northbridges;
36EXPORT_SYMBOL(amd_northbridges);
37
38static struct pci_dev *next_northbridge(struct pci_dev *dev,
39 const struct pci_device_id *ids)
40{
41 do {
42 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
43 if (!dev)
44 break;
45 } while (!pci_match_id(ids, dev));
46 return dev;
47}
48
49int amd_cache_northbridges(void)
50{
51 u16 i = 0;
52 struct amd_northbridge *nb;
53 struct pci_dev *misc, *link;
54
55 if (amd_nb_num())
56 return 0;
57
58 misc = NULL;
59 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
60 i++;
61
62 if (i == 0)
63 return 0;
64
65 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
66 if (!nb)
67 return -ENOMEM;
68
69 amd_northbridges.nb = nb;
70 amd_northbridges.num = i;
71
72 link = misc = NULL;
73 for (i = 0; i != amd_nb_num(); i++) {
74 node_to_amd_nb(i)->misc = misc =
75 next_northbridge(misc, amd_nb_misc_ids);
76 node_to_amd_nb(i)->link = link =
77 next_northbridge(link, amd_nb_link_ids);
78 }
79
80 /* some CPU families (e.g. family 0x11) do not support GART */
81 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
82 boot_cpu_data.x86 == 0x15)
83 amd_northbridges.flags |= AMD_NB_GART;
84
85 /*
86 * Some CPU families support L3 Cache Index Disable. There are some
87 * limitations because of E382 and E388 on family 0x10.
88 */
89 if (boot_cpu_data.x86 == 0x10 &&
90 boot_cpu_data.x86_model >= 0x8 &&
91 (boot_cpu_data.x86_model > 0x9 ||
92 boot_cpu_data.x86_mask >= 0x1))
93 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
94
95 if (boot_cpu_data.x86 == 0x15)
96 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
97
98 /* L3 cache partitioning is supported on family 0x15 */
99 if (boot_cpu_data.x86 == 0x15)
100 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101
102 return 0;
103}
104EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105
106/*
107 * Ignores subdevice/subvendor but as far as I can figure out
108 * they're useless anyways
109 */
110bool __init early_is_amd_nb(u32 device)
111{
112 const struct pci_device_id *id;
113 u32 vendor = device & 0xffff;
114
115 device >>= 16;
116 for (id = amd_nb_misc_ids; id->vendor; id++)
117 if (vendor == id->vendor && device == id->device)
118 return true;
119 return false;
120}
121
122struct resource *amd_get_mmconfig_range(struct resource *res)
123{
124 u32 address;
125 u64 base, msr;
126 unsigned segn_busn_bits;
127
128 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
129 return NULL;
130
131 /* assume all cpus from fam10h have mmconfig */
132 if (boot_cpu_data.x86 < 0x10)
133 return NULL;
134
135 address = MSR_FAM10H_MMIO_CONF_BASE;
136 rdmsrl(address, msr);
137
138 /* mmconfig is not enabled */
139 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
140 return NULL;
141
142 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
143
144 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
145 FAM10H_MMIO_CONF_BUSRANGE_MASK;
146
147 res->flags = IORESOURCE_MEM;
148 res->start = base;
149 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
150 return res;
151}
152
153int amd_get_subcaches(int cpu)
154{
155 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
156 unsigned int mask;
157 int cuid;
158
159 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
160 return 0;
161
162 pci_read_config_dword(link, 0x1d4, &mask);
163
164 cuid = cpu_data(cpu).compute_unit_id;
165 return (mask >> (4 * cuid)) & 0xf;
166}
167
168int amd_set_subcaches(int cpu, int mask)
169{
170 static unsigned int reset, ban;
171 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
172 unsigned int reg;
173 int cuid;
174
175 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
176 return -EINVAL;
177
178 /* if necessary, collect reset state of L3 partitioning and BAN mode */
179 if (reset == 0) {
180 pci_read_config_dword(nb->link, 0x1d4, &reset);
181 pci_read_config_dword(nb->misc, 0x1b8, &ban);
182 ban &= 0x180000;
183 }
184
185 /* deactivate BAN mode if any subcaches are to be disabled */
186 if (mask != 0xf) {
187 pci_read_config_dword(nb->misc, 0x1b8, ®);
188 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
189 }
190
191 cuid = cpu_data(cpu).compute_unit_id;
192 mask <<= 4 * cuid;
193 mask |= (0xf ^ (1 << cuid)) << 26;
194
195 pci_write_config_dword(nb->link, 0x1d4, mask);
196
197 /* reset BAN mode if L3 partitioning returned to reset state */
198 pci_read_config_dword(nb->link, 0x1d4, ®);
199 if (reg == reset) {
200 pci_read_config_dword(nb->misc, 0x1b8, ®);
201 reg &= ~0x180000;
202 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
203 }
204
205 return 0;
206}
207
208static int amd_cache_gart(void)
209{
210 u16 i;
211
212 if (!amd_nb_has_feature(AMD_NB_GART))
213 return 0;
214
215 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
216 if (!flush_words) {
217 amd_northbridges.flags &= ~AMD_NB_GART;
218 return -ENOMEM;
219 }
220
221 for (i = 0; i != amd_nb_num(); i++)
222 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
223 &flush_words[i]);
224
225 return 0;
226}
227
228void amd_flush_garts(void)
229{
230 int flushed, i;
231 unsigned long flags;
232 static DEFINE_SPINLOCK(gart_lock);
233
234 if (!amd_nb_has_feature(AMD_NB_GART))
235 return;
236
237 /* Avoid races between AGP and IOMMU. In theory it's not needed
238 but I'm not sure if the hardware won't lose flush requests
239 when another is pending. This whole thing is so expensive anyways
240 that it doesn't matter to serialize more. -AK */
241 spin_lock_irqsave(&gart_lock, flags);
242 flushed = 0;
243 for (i = 0; i < amd_nb_num(); i++) {
244 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
245 flush_words[i] | 1);
246 flushed++;
247 }
248 for (i = 0; i < amd_nb_num(); i++) {
249 u32 w;
250 /* Make sure the hardware actually executed the flush*/
251 for (;;) {
252 pci_read_config_dword(node_to_amd_nb(i)->misc,
253 0x9c, &w);
254 if (!(w & 1))
255 break;
256 cpu_relax();
257 }
258 }
259 spin_unlock_irqrestore(&gart_lock, flags);
260 if (!flushed)
261 printk("nothing to flush?\n");
262}
263EXPORT_SYMBOL_GPL(amd_flush_garts);
264
265static __init int init_amd_nbs(void)
266{
267 int err = 0;
268
269 err = amd_cache_northbridges();
270
271 if (err < 0)
272 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
273
274 if (amd_cache_gart() < 0)
275 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
276 "GART support disabled.\n");
277
278 return err;
279}
280
281/* This has to go after the PCI subsystem */
282fs_initcall(init_amd_nbs);
1/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/export.h>
13#include <linux/spinlock.h>
14#include <asm/amd_nb.h>
15
16#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
17#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
18#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
19
20/* Protect the PCI config register pairs used for SMN and DF indirect access. */
21static DEFINE_MUTEX(smn_mutex);
22
23static u32 *flush_words;
24
25static const struct pci_device_id amd_root_ids[] = {
26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
27 {}
28};
29
30const struct pci_device_id amd_nb_misc_ids[] = {
31 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
32 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
33 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
34 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
35 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
36 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
37 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
38 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
39 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
40 {}
41};
42EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
43
44static const struct pci_device_id amd_nb_link_ids[] = {
45 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
46 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
47 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
48 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
49 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
50 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
51 {}
52};
53
54const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
55 { 0x00, 0x18, 0x20 },
56 { 0xff, 0x00, 0x20 },
57 { 0xfe, 0x00, 0x20 },
58 { }
59};
60
61static struct amd_northbridge_info amd_northbridges;
62
63u16 amd_nb_num(void)
64{
65 return amd_northbridges.num;
66}
67EXPORT_SYMBOL_GPL(amd_nb_num);
68
69bool amd_nb_has_feature(unsigned int feature)
70{
71 return ((amd_northbridges.flags & feature) == feature);
72}
73EXPORT_SYMBOL_GPL(amd_nb_has_feature);
74
75struct amd_northbridge *node_to_amd_nb(int node)
76{
77 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
78}
79EXPORT_SYMBOL_GPL(node_to_amd_nb);
80
81static struct pci_dev *next_northbridge(struct pci_dev *dev,
82 const struct pci_device_id *ids)
83{
84 do {
85 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
86 if (!dev)
87 break;
88 } while (!pci_match_id(ids, dev));
89 return dev;
90}
91
92static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
93{
94 struct pci_dev *root;
95 int err = -ENODEV;
96
97 if (node >= amd_northbridges.num)
98 goto out;
99
100 root = node_to_amd_nb(node)->root;
101 if (!root)
102 goto out;
103
104 mutex_lock(&smn_mutex);
105
106 err = pci_write_config_dword(root, 0x60, address);
107 if (err) {
108 pr_warn("Error programming SMN address 0x%x.\n", address);
109 goto out_unlock;
110 }
111
112 err = (write ? pci_write_config_dword(root, 0x64, *value)
113 : pci_read_config_dword(root, 0x64, value));
114 if (err)
115 pr_warn("Error %s SMN address 0x%x.\n",
116 (write ? "writing to" : "reading from"), address);
117
118out_unlock:
119 mutex_unlock(&smn_mutex);
120
121out:
122 return err;
123}
124
125int amd_smn_read(u16 node, u32 address, u32 *value)
126{
127 return __amd_smn_rw(node, address, value, false);
128}
129EXPORT_SYMBOL_GPL(amd_smn_read);
130
131int amd_smn_write(u16 node, u32 address, u32 value)
132{
133 return __amd_smn_rw(node, address, &value, true);
134}
135EXPORT_SYMBOL_GPL(amd_smn_write);
136
137/*
138 * Data Fabric Indirect Access uses FICAA/FICAD.
139 *
140 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
141 * on the device's Instance Id and the PCI function and register offset of
142 * the desired register.
143 *
144 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
145 * and FICAD HI registers but so far we only need the LO register.
146 */
147int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
148{
149 struct pci_dev *F4;
150 u32 ficaa;
151 int err = -ENODEV;
152
153 if (node >= amd_northbridges.num)
154 goto out;
155
156 F4 = node_to_amd_nb(node)->link;
157 if (!F4)
158 goto out;
159
160 ficaa = 1;
161 ficaa |= reg & 0x3FC;
162 ficaa |= (func & 0x7) << 11;
163 ficaa |= instance_id << 16;
164
165 mutex_lock(&smn_mutex);
166
167 err = pci_write_config_dword(F4, 0x5C, ficaa);
168 if (err) {
169 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
170 goto out_unlock;
171 }
172
173 err = pci_read_config_dword(F4, 0x98, lo);
174 if (err)
175 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
176
177out_unlock:
178 mutex_unlock(&smn_mutex);
179
180out:
181 return err;
182}
183EXPORT_SYMBOL_GPL(amd_df_indirect_read);
184
185int amd_cache_northbridges(void)
186{
187 u16 i = 0;
188 struct amd_northbridge *nb;
189 struct pci_dev *root, *misc, *link;
190
191 if (amd_northbridges.num)
192 return 0;
193
194 misc = NULL;
195 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
196 i++;
197
198 if (!i)
199 return -ENODEV;
200
201 nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
202 if (!nb)
203 return -ENOMEM;
204
205 amd_northbridges.nb = nb;
206 amd_northbridges.num = i;
207
208 link = misc = root = NULL;
209 for (i = 0; i != amd_northbridges.num; i++) {
210 node_to_amd_nb(i)->root = root =
211 next_northbridge(root, amd_root_ids);
212 node_to_amd_nb(i)->misc = misc =
213 next_northbridge(misc, amd_nb_misc_ids);
214 node_to_amd_nb(i)->link = link =
215 next_northbridge(link, amd_nb_link_ids);
216 }
217
218 if (amd_gart_present())
219 amd_northbridges.flags |= AMD_NB_GART;
220
221 /*
222 * Check for L3 cache presence.
223 */
224 if (!cpuid_edx(0x80000006))
225 return 0;
226
227 /*
228 * Some CPU families support L3 Cache Index Disable. There are some
229 * limitations because of E382 and E388 on family 0x10.
230 */
231 if (boot_cpu_data.x86 == 0x10 &&
232 boot_cpu_data.x86_model >= 0x8 &&
233 (boot_cpu_data.x86_model > 0x9 ||
234 boot_cpu_data.x86_mask >= 0x1))
235 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
236
237 if (boot_cpu_data.x86 == 0x15)
238 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
239
240 /* L3 cache partitioning is supported on family 0x15 */
241 if (boot_cpu_data.x86 == 0x15)
242 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
243
244 return 0;
245}
246EXPORT_SYMBOL_GPL(amd_cache_northbridges);
247
248/*
249 * Ignores subdevice/subvendor but as far as I can figure out
250 * they're useless anyways
251 */
252bool __init early_is_amd_nb(u32 device)
253{
254 const struct pci_device_id *id;
255 u32 vendor = device & 0xffff;
256
257 device >>= 16;
258 for (id = amd_nb_misc_ids; id->vendor; id++)
259 if (vendor == id->vendor && device == id->device)
260 return true;
261 return false;
262}
263
264struct resource *amd_get_mmconfig_range(struct resource *res)
265{
266 u32 address;
267 u64 base, msr;
268 unsigned int segn_busn_bits;
269
270 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
271 return NULL;
272
273 /* assume all cpus from fam10h have mmconfig */
274 if (boot_cpu_data.x86 < 0x10)
275 return NULL;
276
277 address = MSR_FAM10H_MMIO_CONF_BASE;
278 rdmsrl(address, msr);
279
280 /* mmconfig is not enabled */
281 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
282 return NULL;
283
284 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
285
286 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
287 FAM10H_MMIO_CONF_BUSRANGE_MASK;
288
289 res->flags = IORESOURCE_MEM;
290 res->start = base;
291 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
292 return res;
293}
294
295int amd_get_subcaches(int cpu)
296{
297 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
298 unsigned int mask;
299
300 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
301 return 0;
302
303 pci_read_config_dword(link, 0x1d4, &mask);
304
305 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
306}
307
308int amd_set_subcaches(int cpu, unsigned long mask)
309{
310 static unsigned int reset, ban;
311 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
312 unsigned int reg;
313 int cuid;
314
315 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
316 return -EINVAL;
317
318 /* if necessary, collect reset state of L3 partitioning and BAN mode */
319 if (reset == 0) {
320 pci_read_config_dword(nb->link, 0x1d4, &reset);
321 pci_read_config_dword(nb->misc, 0x1b8, &ban);
322 ban &= 0x180000;
323 }
324
325 /* deactivate BAN mode if any subcaches are to be disabled */
326 if (mask != 0xf) {
327 pci_read_config_dword(nb->misc, 0x1b8, ®);
328 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
329 }
330
331 cuid = cpu_data(cpu).cpu_core_id;
332 mask <<= 4 * cuid;
333 mask |= (0xf ^ (1 << cuid)) << 26;
334
335 pci_write_config_dword(nb->link, 0x1d4, mask);
336
337 /* reset BAN mode if L3 partitioning returned to reset state */
338 pci_read_config_dword(nb->link, 0x1d4, ®);
339 if (reg == reset) {
340 pci_read_config_dword(nb->misc, 0x1b8, ®);
341 reg &= ~0x180000;
342 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
343 }
344
345 return 0;
346}
347
348static void amd_cache_gart(void)
349{
350 u16 i;
351
352 if (!amd_nb_has_feature(AMD_NB_GART))
353 return;
354
355 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
356 if (!flush_words) {
357 amd_northbridges.flags &= ~AMD_NB_GART;
358 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
359 return;
360 }
361
362 for (i = 0; i != amd_northbridges.num; i++)
363 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
364}
365
366void amd_flush_garts(void)
367{
368 int flushed, i;
369 unsigned long flags;
370 static DEFINE_SPINLOCK(gart_lock);
371
372 if (!amd_nb_has_feature(AMD_NB_GART))
373 return;
374
375 /*
376 * Avoid races between AGP and IOMMU. In theory it's not needed
377 * but I'm not sure if the hardware won't lose flush requests
378 * when another is pending. This whole thing is so expensive anyways
379 * that it doesn't matter to serialize more. -AK
380 */
381 spin_lock_irqsave(&gart_lock, flags);
382 flushed = 0;
383 for (i = 0; i < amd_northbridges.num; i++) {
384 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
385 flush_words[i] | 1);
386 flushed++;
387 }
388 for (i = 0; i < amd_northbridges.num; i++) {
389 u32 w;
390 /* Make sure the hardware actually executed the flush*/
391 for (;;) {
392 pci_read_config_dword(node_to_amd_nb(i)->misc,
393 0x9c, &w);
394 if (!(w & 1))
395 break;
396 cpu_relax();
397 }
398 }
399 spin_unlock_irqrestore(&gart_lock, flags);
400 if (!flushed)
401 pr_notice("nothing to flush?\n");
402}
403EXPORT_SYMBOL_GPL(amd_flush_garts);
404
405static __init int init_amd_nbs(void)
406{
407 amd_cache_northbridges();
408 amd_cache_gart();
409
410 return 0;
411}
412
413/* This has to go after the PCI subsystem */
414fs_initcall(init_amd_nbs);