Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This file contains work-arounds for x86 and x86_64 platform bugs.
  4 */
  5#include <linux/dmi.h>
  6#include <linux/pci.h>
  7#include <linux/irq.h>
  8
  9#include <asm/hpet.h>
 10#include <asm/setup.h>
 
 11
 12#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
 13
 14static void quirk_intel_irqbalance(struct pci_dev *dev)
 15{
 16	u8 config;
 17	u16 word;
 18
 19	/* BIOS may enable hardware IRQ balancing for
 20	 * E7520/E7320/E7525(revision ID 0x9 and below)
 21	 * based platforms.
 22	 * Disable SW irqbalance/affinity on those platforms.
 23	 */
 24	if (dev->revision > 0x9)
 25		return;
 26
 27	/* enable access to config space*/
 28	pci_read_config_byte(dev, 0xf4, &config);
 29	pci_write_config_byte(dev, 0xf4, config|0x2);
 30
 31	/*
 32	 * read xTPR register.  We may not have a pci_dev for device 8
 33	 * because it might be hidden until the above write.
 34	 */
 35	pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
 36
 37	if (!(word & (1 << 13))) {
 38		dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
 39			"disabling irq balancing and affinity\n");
 40		noirqdebug_setup("");
 41#ifdef CONFIG_PROC_FS
 42		no_irq_affinity = 1;
 43#endif
 44	}
 45
 46	/* put back the original value for config space*/
 47	if (!(config & 0x2))
 48		pci_write_config_byte(dev, 0xf4, config);
 49}
 50DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
 51			quirk_intel_irqbalance);
 52DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
 53			quirk_intel_irqbalance);
 54DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
 55			quirk_intel_irqbalance);
 56#endif
 57
 58#if defined(CONFIG_HPET_TIMER)
 59unsigned long force_hpet_address;
 60
 61static enum {
 62	NONE_FORCE_HPET_RESUME,
 63	OLD_ICH_FORCE_HPET_RESUME,
 64	ICH_FORCE_HPET_RESUME,
 65	VT8237_FORCE_HPET_RESUME,
 66	NVIDIA_FORCE_HPET_RESUME,
 67	ATI_FORCE_HPET_RESUME,
 68} force_hpet_resume_type;
 69
 70static void __iomem *rcba_base;
 71
 72static void ich_force_hpet_resume(void)
 73{
 74	u32 val;
 75
 76	if (!force_hpet_address)
 77		return;
 78
 79	BUG_ON(rcba_base == NULL);
 80
 81	/* read the Function Disable register, dword mode only */
 82	val = readl(rcba_base + 0x3404);
 83	if (!(val & 0x80)) {
 84		/* HPET disabled in HPTC. Trying to enable */
 85		writel(val | 0x80, rcba_base + 0x3404);
 86	}
 87
 88	val = readl(rcba_base + 0x3404);
 89	if (!(val & 0x80))
 90		BUG();
 91	else
 92		printk(KERN_DEBUG "Force enabled HPET at resume\n");
 93}
 94
 95static void ich_force_enable_hpet(struct pci_dev *dev)
 96{
 97	u32 val;
 98	u32 uninitialized_var(rcba);
 99	int err = 0;
100
101	if (hpet_address || force_hpet_address)
102		return;
103
104	pci_read_config_dword(dev, 0xF0, &rcba);
105	rcba &= 0xFFFFC000;
106	if (rcba == 0) {
107		dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
108			"cannot force enable HPET\n");
109		return;
110	}
111
112	/* use bits 31:14, 16 kB aligned */
113	rcba_base = ioremap_nocache(rcba, 0x4000);
114	if (rcba_base == NULL) {
115		dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
116			"cannot force enable HPET\n");
117		return;
118	}
119
120	/* read the Function Disable register, dword mode only */
121	val = readl(rcba_base + 0x3404);
122
123	if (val & 0x80) {
124		/* HPET is enabled in HPTC. Just not reported by BIOS */
125		val = val & 0x3;
126		force_hpet_address = 0xFED00000 | (val << 12);
127		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
128			"0x%lx\n", force_hpet_address);
129		iounmap(rcba_base);
130		return;
131	}
132
133	/* HPET disabled in HPTC. Trying to enable */
134	writel(val | 0x80, rcba_base + 0x3404);
135
136	val = readl(rcba_base + 0x3404);
137	if (!(val & 0x80)) {
138		err = 1;
139	} else {
140		val = val & 0x3;
141		force_hpet_address = 0xFED00000 | (val << 12);
142	}
143
144	if (err) {
145		force_hpet_address = 0;
146		iounmap(rcba_base);
147		dev_printk(KERN_DEBUG, &dev->dev,
148			"Failed to force enable HPET\n");
149	} else {
150		force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
151		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
152			"0x%lx\n", force_hpet_address);
153	}
154}
155
156DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
157			 ich_force_enable_hpet);
158DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
159			 ich_force_enable_hpet);
160DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
161			 ich_force_enable_hpet);
162DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
163			 ich_force_enable_hpet);
164DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
165			 ich_force_enable_hpet);
166DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
167			 ich_force_enable_hpet);
168DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
169			 ich_force_enable_hpet);
170DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
171			 ich_force_enable_hpet);
172DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
173			 ich_force_enable_hpet);
174DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16,	/* ICH10 */
175			 ich_force_enable_hpet);
176
177static struct pci_dev *cached_dev;
178
179static void hpet_print_force_info(void)
180{
181	printk(KERN_INFO "HPET not enabled in BIOS. "
182	       "You might try hpet=force boot option\n");
183}
184
185static void old_ich_force_hpet_resume(void)
186{
187	u32 val;
188	u32 uninitialized_var(gen_cntl);
189
190	if (!force_hpet_address || !cached_dev)
191		return;
192
193	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
194	gen_cntl &= (~(0x7 << 15));
195	gen_cntl |= (0x4 << 15);
196
197	pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
198	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
199	val = gen_cntl >> 15;
200	val &= 0x7;
201	if (val == 0x4)
202		printk(KERN_DEBUG "Force enabled HPET at resume\n");
203	else
204		BUG();
205}
206
207static void old_ich_force_enable_hpet(struct pci_dev *dev)
208{
209	u32 val;
210	u32 uninitialized_var(gen_cntl);
211
212	if (hpet_address || force_hpet_address)
213		return;
214
215	pci_read_config_dword(dev, 0xD0, &gen_cntl);
216	/*
217	 * Bit 17 is HPET enable bit.
218	 * Bit 16:15 control the HPET base address.
219	 */
220	val = gen_cntl >> 15;
221	val &= 0x7;
222	if (val & 0x4) {
223		val &= 0x3;
224		force_hpet_address = 0xFED00000 | (val << 12);
225		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
226			force_hpet_address);
227		return;
228	}
229
230	/*
231	 * HPET is disabled. Trying enabling at FED00000 and check
232	 * whether it sticks
233	 */
234	gen_cntl &= (~(0x7 << 15));
235	gen_cntl |= (0x4 << 15);
236	pci_write_config_dword(dev, 0xD0, gen_cntl);
237
238	pci_read_config_dword(dev, 0xD0, &gen_cntl);
239
240	val = gen_cntl >> 15;
241	val &= 0x7;
242	if (val & 0x4) {
243		/* HPET is enabled in HPTC. Just not reported by BIOS */
244		val &= 0x3;
245		force_hpet_address = 0xFED00000 | (val << 12);
246		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
247			"0x%lx\n", force_hpet_address);
248		cached_dev = dev;
249		force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
250		return;
251	}
252
253	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
254}
255
256/*
257 * Undocumented chipset features. Make sure that the user enforced
258 * this.
259 */
260static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
261{
262	if (hpet_force_user)
263		old_ich_force_enable_hpet(dev);
264}
265
266DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
267			 old_ich_force_enable_hpet_user);
268DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
269			 old_ich_force_enable_hpet_user);
270DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
271			 old_ich_force_enable_hpet_user);
272DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
273			 old_ich_force_enable_hpet_user);
274DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
275			 old_ich_force_enable_hpet_user);
276DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
277			 old_ich_force_enable_hpet);
278DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
279			 old_ich_force_enable_hpet);
280
281
282static void vt8237_force_hpet_resume(void)
283{
284	u32 val;
285
286	if (!force_hpet_address || !cached_dev)
287		return;
288
289	val = 0xfed00000 | 0x80;
290	pci_write_config_dword(cached_dev, 0x68, val);
291
292	pci_read_config_dword(cached_dev, 0x68, &val);
293	if (val & 0x80)
294		printk(KERN_DEBUG "Force enabled HPET at resume\n");
295	else
296		BUG();
297}
298
299static void vt8237_force_enable_hpet(struct pci_dev *dev)
300{
301	u32 uninitialized_var(val);
302
303	if (hpet_address || force_hpet_address)
304		return;
305
306	if (!hpet_force_user) {
307		hpet_print_force_info();
308		return;
309	}
310
311	pci_read_config_dword(dev, 0x68, &val);
312	/*
313	 * Bit 7 is HPET enable bit.
314	 * Bit 31:10 is HPET base address (contrary to what datasheet claims)
315	 */
316	if (val & 0x80) {
317		force_hpet_address = (val & ~0x3ff);
318		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
319			force_hpet_address);
320		return;
321	}
322
323	/*
324	 * HPET is disabled. Trying enabling at FED00000 and check
325	 * whether it sticks
326	 */
327	val = 0xfed00000 | 0x80;
328	pci_write_config_dword(dev, 0x68, val);
329
330	pci_read_config_dword(dev, 0x68, &val);
331	if (val & 0x80) {
332		force_hpet_address = (val & ~0x3ff);
333		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
334			"0x%lx\n", force_hpet_address);
335		cached_dev = dev;
336		force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
337		return;
338	}
339
340	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
341}
342
343DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
344			 vt8237_force_enable_hpet);
345DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
346			 vt8237_force_enable_hpet);
347DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
348			 vt8237_force_enable_hpet);
349
350static void ati_force_hpet_resume(void)
351{
352	pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
353	printk(KERN_DEBUG "Force enabled HPET at resume\n");
354}
355
356static u32 ati_ixp4x0_rev(struct pci_dev *dev)
357{
358	int err = 0;
359	u32 d = 0;
360	u8  b = 0;
361
362	err = pci_read_config_byte(dev, 0xac, &b);
363	b &= ~(1<<5);
364	err |= pci_write_config_byte(dev, 0xac, b);
365	err |= pci_read_config_dword(dev, 0x70, &d);
366	d |= 1<<8;
367	err |= pci_write_config_dword(dev, 0x70, d);
368	err |= pci_read_config_dword(dev, 0x8, &d);
369	d &= 0xff;
370	dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
371
372	WARN_ON_ONCE(err);
373
374	return d;
375}
376
377static void ati_force_enable_hpet(struct pci_dev *dev)
378{
379	u32 d, val;
380	u8  b;
381
382	if (hpet_address || force_hpet_address)
383		return;
384
385	if (!hpet_force_user) {
386		hpet_print_force_info();
387		return;
388	}
389
390	d = ati_ixp4x0_rev(dev);
391	if (d  < 0x82)
392		return;
393
394	/* base address */
395	pci_write_config_dword(dev, 0x14, 0xfed00000);
396	pci_read_config_dword(dev, 0x14, &val);
397
398	/* enable interrupt */
399	outb(0x72, 0xcd6); b = inb(0xcd7);
400	b |= 0x1;
401	outb(0x72, 0xcd6); outb(b, 0xcd7);
402	outb(0x72, 0xcd6); b = inb(0xcd7);
403	if (!(b & 0x1))
404		return;
405	pci_read_config_dword(dev, 0x64, &d);
406	d |= (1<<10);
407	pci_write_config_dword(dev, 0x64, d);
408	pci_read_config_dword(dev, 0x64, &d);
409	if (!(d & (1<<10)))
410		return;
411
412	force_hpet_address = val;
413	force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
414	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
415		   force_hpet_address);
416	cached_dev = dev;
417}
418DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
419			 ati_force_enable_hpet);
420
421/*
422 * Undocumented chipset feature taken from LinuxBIOS.
423 */
424static void nvidia_force_hpet_resume(void)
425{
426	pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
427	printk(KERN_DEBUG "Force enabled HPET at resume\n");
428}
429
430static void nvidia_force_enable_hpet(struct pci_dev *dev)
431{
432	u32 uninitialized_var(val);
433
434	if (hpet_address || force_hpet_address)
435		return;
436
437	if (!hpet_force_user) {
438		hpet_print_force_info();
439		return;
440	}
441
442	pci_write_config_dword(dev, 0x44, 0xfed00001);
443	pci_read_config_dword(dev, 0x44, &val);
444	force_hpet_address = val & 0xfffffffe;
445	force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
446	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
447		force_hpet_address);
448	cached_dev = dev;
449}
450
451/* ISA Bridges */
452DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
453			nvidia_force_enable_hpet);
454DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
455			nvidia_force_enable_hpet);
456
457/* LPC bridges */
458DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
459			nvidia_force_enable_hpet);
460DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
461			nvidia_force_enable_hpet);
462DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
463			nvidia_force_enable_hpet);
464DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
465			nvidia_force_enable_hpet);
466DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
467			nvidia_force_enable_hpet);
468DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
469			nvidia_force_enable_hpet);
470DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
471			nvidia_force_enable_hpet);
472DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
473			nvidia_force_enable_hpet);
474DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
475			nvidia_force_enable_hpet);
476
477void force_hpet_resume(void)
478{
479	switch (force_hpet_resume_type) {
480	case ICH_FORCE_HPET_RESUME:
481		ich_force_hpet_resume();
482		return;
483	case OLD_ICH_FORCE_HPET_RESUME:
484		old_ich_force_hpet_resume();
485		return;
486	case VT8237_FORCE_HPET_RESUME:
487		vt8237_force_hpet_resume();
488		return;
489	case NVIDIA_FORCE_HPET_RESUME:
490		nvidia_force_hpet_resume();
491		return;
492	case ATI_FORCE_HPET_RESUME:
493		ati_force_hpet_resume();
494		return;
495	default:
496		break;
497	}
498}
499
500/*
501 * According to the datasheet e6xx systems have the HPET hardwired to
502 * 0xfed00000
503 */
504static void e6xx_force_enable_hpet(struct pci_dev *dev)
505{
506	if (hpet_address || force_hpet_address)
507		return;
508
509	force_hpet_address = 0xFED00000;
510	force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
511	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
512		"0x%lx\n", force_hpet_address);
513}
514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
515			 e6xx_force_enable_hpet);
516
517/*
518 * HPET MSI on some boards (ATI SB700/SB800) has side effect on
519 * floppy DMA. Disable HPET MSI on such platforms.
520 * See erratum #27 (Misinterpreted MSI Requests May Result in
521 * Corrupted LPC DMA Data) in AMD Publication #46837,
522 * "SB700 Family Product Errata", Rev. 1.0, March 2010.
523 */
524static void force_disable_hpet_msi(struct pci_dev *unused)
525{
526	hpet_msi_disable = true;
527}
528
529DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
530			 force_disable_hpet_msi);
531
532#endif
533
534#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
535/* Set correct numa_node information for AMD NB functions */
536static void quirk_amd_nb_node(struct pci_dev *dev)
537{
538	struct pci_dev *nb_ht;
539	unsigned int devfn;
540	u32 node;
541	u32 val;
542
543	devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
544	nb_ht = pci_get_slot(dev->bus, devfn);
545	if (!nb_ht)
546		return;
547
548	pci_read_config_dword(nb_ht, 0x60, &val);
549	node = pcibus_to_node(dev->bus) | (val & 7);
550	/*
551	 * Some hardware may return an invalid node ID,
552	 * so check it first:
553	 */
554	if (node_online(node))
555		set_dev_node(&dev->dev, node);
556	pci_dev_put(nb_ht);
557}
558
559DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
560			quirk_amd_nb_node);
561DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
562			quirk_amd_nb_node);
563DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
564			quirk_amd_nb_node);
565DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
566			quirk_amd_nb_node);
567DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
568			quirk_amd_nb_node);
569DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
570			quirk_amd_nb_node);
571DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
572			quirk_amd_nb_node);
573DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
574			quirk_amd_nb_node);
575DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
576			quirk_amd_nb_node);
577DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
578			quirk_amd_nb_node);
579DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
580			quirk_amd_nb_node);
581DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
582			quirk_amd_nb_node);
583DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
584			quirk_amd_nb_node);
585DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
586			quirk_amd_nb_node);
587DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
588			quirk_amd_nb_node);
589
590#endif
591
592#ifdef CONFIG_PCI
593/*
594 * Processor does not ensure DRAM scrub read/write sequence
595 * is atomic wrt accesses to CC6 save state area. Therefore
596 * if a concurrent scrub read/write access is to same address
597 * the entry may appear as if it is not written. This quirk
598 * applies to Fam16h models 00h-0Fh
599 *
600 * See "Revision Guide" for AMD F16h models 00h-0fh,
601 * document 51810 rev. 3.04, Nov 2013
602 */
603static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
604{
605	u32 val;
606
607	/*
608	 * Suggested workaround:
609	 * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
610	 */
611	pci_read_config_dword(dev, 0x58, &val);
612	if (val & 0x1F) {
613		val &= ~(0x1F);
614		pci_write_config_dword(dev, 0x58, val);
615	}
616
617	pci_read_config_dword(dev, 0x5C, &val);
618	if (val & BIT(0)) {
619		val &= ~BIT(0);
620		pci_write_config_dword(dev, 0x5c, val);
621	}
622}
623
624DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
625			amd_disable_seq_and_redirect_scrub);
626
627#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
628#include <linux/jump_label.h>
629#include <asm/string_64.h>
630
631/* Ivy Bridge, Haswell, Broadwell */
632static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
633{
634	u32 capid0;
635
636	pci_read_config_dword(pdev, 0x84, &capid0);
637
638	if (capid0 & 0x10)
639		static_branch_inc(&mcsafe_key);
640}
641
642/* Skylake */
643static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
644{
645	u32 capid0, capid5;
646
647	pci_read_config_dword(pdev, 0x84, &capid0);
648	pci_read_config_dword(pdev, 0x98, &capid5);
649
650	/*
651	 * CAPID0{7:6} indicate whether this is an advanced RAS SKU
652	 * CAPID5{8:5} indicate that various NVDIMM usage modes are
653	 * enabled, so memory machine check recovery is also enabled.
654	 */
655	if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
656		static_branch_inc(&mcsafe_key);
657
658}
659DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
660DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
661DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
662DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
663#endif
664#endif
665
666bool x86_apple_machine;
667EXPORT_SYMBOL(x86_apple_machine);
668
669void __init early_platform_quirks(void)
670{
671	x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") ||
672			    dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.");
673}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This file contains work-arounds for x86 and x86_64 platform bugs.
  4 */
  5#include <linux/dmi.h>
  6#include <linux/pci.h>
  7#include <linux/irq.h>
  8
  9#include <asm/hpet.h>
 10#include <asm/setup.h>
 11#include <asm/mce.h>
 12
 13#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
 14
 15static void quirk_intel_irqbalance(struct pci_dev *dev)
 16{
 17	u8 config;
 18	u16 word;
 19
 20	/* BIOS may enable hardware IRQ balancing for
 21	 * E7520/E7320/E7525(revision ID 0x9 and below)
 22	 * based platforms.
 23	 * Disable SW irqbalance/affinity on those platforms.
 24	 */
 25	if (dev->revision > 0x9)
 26		return;
 27
 28	/* enable access to config space*/
 29	pci_read_config_byte(dev, 0xf4, &config);
 30	pci_write_config_byte(dev, 0xf4, config|0x2);
 31
 32	/*
 33	 * read xTPR register.  We may not have a pci_dev for device 8
 34	 * because it might be hidden until the above write.
 35	 */
 36	pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
 37
 38	if (!(word & (1 << 13))) {
 39		dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
 40			"disabling irq balancing and affinity\n");
 41		noirqdebug_setup("");
 42#ifdef CONFIG_PROC_FS
 43		no_irq_affinity = 1;
 44#endif
 45	}
 46
 47	/* put back the original value for config space*/
 48	if (!(config & 0x2))
 49		pci_write_config_byte(dev, 0xf4, config);
 50}
 51DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
 52			quirk_intel_irqbalance);
 53DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
 54			quirk_intel_irqbalance);
 55DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
 56			quirk_intel_irqbalance);
 57#endif
 58
 59#if defined(CONFIG_HPET_TIMER)
 60unsigned long force_hpet_address;
 61
 62static enum {
 63	NONE_FORCE_HPET_RESUME,
 64	OLD_ICH_FORCE_HPET_RESUME,
 65	ICH_FORCE_HPET_RESUME,
 66	VT8237_FORCE_HPET_RESUME,
 67	NVIDIA_FORCE_HPET_RESUME,
 68	ATI_FORCE_HPET_RESUME,
 69} force_hpet_resume_type;
 70
 71static void __iomem *rcba_base;
 72
 73static void ich_force_hpet_resume(void)
 74{
 75	u32 val;
 76
 77	if (!force_hpet_address)
 78		return;
 79
 80	BUG_ON(rcba_base == NULL);
 81
 82	/* read the Function Disable register, dword mode only */
 83	val = readl(rcba_base + 0x3404);
 84	if (!(val & 0x80)) {
 85		/* HPET disabled in HPTC. Trying to enable */
 86		writel(val | 0x80, rcba_base + 0x3404);
 87	}
 88
 89	val = readl(rcba_base + 0x3404);
 90	if (!(val & 0x80))
 91		BUG();
 92	else
 93		printk(KERN_DEBUG "Force enabled HPET at resume\n");
 94}
 95
 96static void ich_force_enable_hpet(struct pci_dev *dev)
 97{
 98	u32 val;
 99	u32 rcba;
100	int err = 0;
101
102	if (hpet_address || force_hpet_address)
103		return;
104
105	pci_read_config_dword(dev, 0xF0, &rcba);
106	rcba &= 0xFFFFC000;
107	if (rcba == 0) {
108		dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
109			"cannot force enable HPET\n");
110		return;
111	}
112
113	/* use bits 31:14, 16 kB aligned */
114	rcba_base = ioremap(rcba, 0x4000);
115	if (rcba_base == NULL) {
116		dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
117			"cannot force enable HPET\n");
118		return;
119	}
120
121	/* read the Function Disable register, dword mode only */
122	val = readl(rcba_base + 0x3404);
123
124	if (val & 0x80) {
125		/* HPET is enabled in HPTC. Just not reported by BIOS */
126		val = val & 0x3;
127		force_hpet_address = 0xFED00000 | (val << 12);
128		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
129			"0x%lx\n", force_hpet_address);
130		iounmap(rcba_base);
131		return;
132	}
133
134	/* HPET disabled in HPTC. Trying to enable */
135	writel(val | 0x80, rcba_base + 0x3404);
136
137	val = readl(rcba_base + 0x3404);
138	if (!(val & 0x80)) {
139		err = 1;
140	} else {
141		val = val & 0x3;
142		force_hpet_address = 0xFED00000 | (val << 12);
143	}
144
145	if (err) {
146		force_hpet_address = 0;
147		iounmap(rcba_base);
148		dev_printk(KERN_DEBUG, &dev->dev,
149			"Failed to force enable HPET\n");
150	} else {
151		force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
152		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
153			"0x%lx\n", force_hpet_address);
154	}
155}
156
157DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
158			 ich_force_enable_hpet);
159DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
160			 ich_force_enable_hpet);
161DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
162			 ich_force_enable_hpet);
163DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
164			 ich_force_enable_hpet);
165DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
166			 ich_force_enable_hpet);
167DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
168			 ich_force_enable_hpet);
169DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
170			 ich_force_enable_hpet);
171DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
172			 ich_force_enable_hpet);
173DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
174			 ich_force_enable_hpet);
175DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16,	/* ICH10 */
176			 ich_force_enable_hpet);
177
178static struct pci_dev *cached_dev;
179
180static void hpet_print_force_info(void)
181{
182	printk(KERN_INFO "HPET not enabled in BIOS. "
183	       "You might try hpet=force boot option\n");
184}
185
186static void old_ich_force_hpet_resume(void)
187{
188	u32 val;
189	u32 gen_cntl;
190
191	if (!force_hpet_address || !cached_dev)
192		return;
193
194	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
195	gen_cntl &= (~(0x7 << 15));
196	gen_cntl |= (0x4 << 15);
197
198	pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
199	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
200	val = gen_cntl >> 15;
201	val &= 0x7;
202	if (val == 0x4)
203		printk(KERN_DEBUG "Force enabled HPET at resume\n");
204	else
205		BUG();
206}
207
208static void old_ich_force_enable_hpet(struct pci_dev *dev)
209{
210	u32 val;
211	u32 gen_cntl;
212
213	if (hpet_address || force_hpet_address)
214		return;
215
216	pci_read_config_dword(dev, 0xD0, &gen_cntl);
217	/*
218	 * Bit 17 is HPET enable bit.
219	 * Bit 16:15 control the HPET base address.
220	 */
221	val = gen_cntl >> 15;
222	val &= 0x7;
223	if (val & 0x4) {
224		val &= 0x3;
225		force_hpet_address = 0xFED00000 | (val << 12);
226		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
227			force_hpet_address);
228		return;
229	}
230
231	/*
232	 * HPET is disabled. Trying enabling at FED00000 and check
233	 * whether it sticks
234	 */
235	gen_cntl &= (~(0x7 << 15));
236	gen_cntl |= (0x4 << 15);
237	pci_write_config_dword(dev, 0xD0, gen_cntl);
238
239	pci_read_config_dword(dev, 0xD0, &gen_cntl);
240
241	val = gen_cntl >> 15;
242	val &= 0x7;
243	if (val & 0x4) {
244		/* HPET is enabled in HPTC. Just not reported by BIOS */
245		val &= 0x3;
246		force_hpet_address = 0xFED00000 | (val << 12);
247		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
248			"0x%lx\n", force_hpet_address);
249		cached_dev = dev;
250		force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
251		return;
252	}
253
254	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
255}
256
257/*
258 * Undocumented chipset features. Make sure that the user enforced
259 * this.
260 */
261static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
262{
263	if (hpet_force_user)
264		old_ich_force_enable_hpet(dev);
265}
266
267DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
268			 old_ich_force_enable_hpet_user);
269DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
270			 old_ich_force_enable_hpet_user);
271DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
272			 old_ich_force_enable_hpet_user);
273DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
274			 old_ich_force_enable_hpet_user);
275DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
276			 old_ich_force_enable_hpet_user);
277DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
278			 old_ich_force_enable_hpet);
279DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
280			 old_ich_force_enable_hpet);
281
282
283static void vt8237_force_hpet_resume(void)
284{
285	u32 val;
286
287	if (!force_hpet_address || !cached_dev)
288		return;
289
290	val = 0xfed00000 | 0x80;
291	pci_write_config_dword(cached_dev, 0x68, val);
292
293	pci_read_config_dword(cached_dev, 0x68, &val);
294	if (val & 0x80)
295		printk(KERN_DEBUG "Force enabled HPET at resume\n");
296	else
297		BUG();
298}
299
300static void vt8237_force_enable_hpet(struct pci_dev *dev)
301{
302	u32 val;
303
304	if (hpet_address || force_hpet_address)
305		return;
306
307	if (!hpet_force_user) {
308		hpet_print_force_info();
309		return;
310	}
311
312	pci_read_config_dword(dev, 0x68, &val);
313	/*
314	 * Bit 7 is HPET enable bit.
315	 * Bit 31:10 is HPET base address (contrary to what datasheet claims)
316	 */
317	if (val & 0x80) {
318		force_hpet_address = (val & ~0x3ff);
319		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
320			force_hpet_address);
321		return;
322	}
323
324	/*
325	 * HPET is disabled. Trying enabling at FED00000 and check
326	 * whether it sticks
327	 */
328	val = 0xfed00000 | 0x80;
329	pci_write_config_dword(dev, 0x68, val);
330
331	pci_read_config_dword(dev, 0x68, &val);
332	if (val & 0x80) {
333		force_hpet_address = (val & ~0x3ff);
334		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
335			"0x%lx\n", force_hpet_address);
336		cached_dev = dev;
337		force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
338		return;
339	}
340
341	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
342}
343
344DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
345			 vt8237_force_enable_hpet);
346DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
347			 vt8237_force_enable_hpet);
348DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
349			 vt8237_force_enable_hpet);
350
351static void ati_force_hpet_resume(void)
352{
353	pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
354	printk(KERN_DEBUG "Force enabled HPET at resume\n");
355}
356
357static u32 ati_ixp4x0_rev(struct pci_dev *dev)
358{
359	int err = 0;
360	u32 d = 0;
361	u8  b = 0;
362
363	err = pci_read_config_byte(dev, 0xac, &b);
364	b &= ~(1<<5);
365	err |= pci_write_config_byte(dev, 0xac, b);
366	err |= pci_read_config_dword(dev, 0x70, &d);
367	d |= 1<<8;
368	err |= pci_write_config_dword(dev, 0x70, d);
369	err |= pci_read_config_dword(dev, 0x8, &d);
370	d &= 0xff;
371	dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
372
373	WARN_ON_ONCE(err);
374
375	return d;
376}
377
378static void ati_force_enable_hpet(struct pci_dev *dev)
379{
380	u32 d, val;
381	u8  b;
382
383	if (hpet_address || force_hpet_address)
384		return;
385
386	if (!hpet_force_user) {
387		hpet_print_force_info();
388		return;
389	}
390
391	d = ati_ixp4x0_rev(dev);
392	if (d  < 0x82)
393		return;
394
395	/* base address */
396	pci_write_config_dword(dev, 0x14, 0xfed00000);
397	pci_read_config_dword(dev, 0x14, &val);
398
399	/* enable interrupt */
400	outb(0x72, 0xcd6); b = inb(0xcd7);
401	b |= 0x1;
402	outb(0x72, 0xcd6); outb(b, 0xcd7);
403	outb(0x72, 0xcd6); b = inb(0xcd7);
404	if (!(b & 0x1))
405		return;
406	pci_read_config_dword(dev, 0x64, &d);
407	d |= (1<<10);
408	pci_write_config_dword(dev, 0x64, d);
409	pci_read_config_dword(dev, 0x64, &d);
410	if (!(d & (1<<10)))
411		return;
412
413	force_hpet_address = val;
414	force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
415	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
416		   force_hpet_address);
417	cached_dev = dev;
418}
419DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
420			 ati_force_enable_hpet);
421
422/*
423 * Undocumented chipset feature taken from LinuxBIOS.
424 */
425static void nvidia_force_hpet_resume(void)
426{
427	pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
428	printk(KERN_DEBUG "Force enabled HPET at resume\n");
429}
430
431static void nvidia_force_enable_hpet(struct pci_dev *dev)
432{
433	u32 val;
434
435	if (hpet_address || force_hpet_address)
436		return;
437
438	if (!hpet_force_user) {
439		hpet_print_force_info();
440		return;
441	}
442
443	pci_write_config_dword(dev, 0x44, 0xfed00001);
444	pci_read_config_dword(dev, 0x44, &val);
445	force_hpet_address = val & 0xfffffffe;
446	force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
447	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
448		force_hpet_address);
449	cached_dev = dev;
450}
451
452/* ISA Bridges */
453DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
454			nvidia_force_enable_hpet);
455DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
456			nvidia_force_enable_hpet);
457
458/* LPC bridges */
459DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
460			nvidia_force_enable_hpet);
461DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
462			nvidia_force_enable_hpet);
463DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
464			nvidia_force_enable_hpet);
465DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
466			nvidia_force_enable_hpet);
467DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
468			nvidia_force_enable_hpet);
469DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
470			nvidia_force_enable_hpet);
471DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
472			nvidia_force_enable_hpet);
473DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
474			nvidia_force_enable_hpet);
475DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
476			nvidia_force_enable_hpet);
477
478void force_hpet_resume(void)
479{
480	switch (force_hpet_resume_type) {
481	case ICH_FORCE_HPET_RESUME:
482		ich_force_hpet_resume();
483		return;
484	case OLD_ICH_FORCE_HPET_RESUME:
485		old_ich_force_hpet_resume();
486		return;
487	case VT8237_FORCE_HPET_RESUME:
488		vt8237_force_hpet_resume();
489		return;
490	case NVIDIA_FORCE_HPET_RESUME:
491		nvidia_force_hpet_resume();
492		return;
493	case ATI_FORCE_HPET_RESUME:
494		ati_force_hpet_resume();
495		return;
496	default:
497		break;
498	}
499}
500
501/*
502 * According to the datasheet e6xx systems have the HPET hardwired to
503 * 0xfed00000
504 */
505static void e6xx_force_enable_hpet(struct pci_dev *dev)
506{
507	if (hpet_address || force_hpet_address)
508		return;
509
510	force_hpet_address = 0xFED00000;
511	force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
512	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
513		"0x%lx\n", force_hpet_address);
514}
515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
516			 e6xx_force_enable_hpet);
517
518/*
519 * HPET MSI on some boards (ATI SB700/SB800) has side effect on
520 * floppy DMA. Disable HPET MSI on such platforms.
521 * See erratum #27 (Misinterpreted MSI Requests May Result in
522 * Corrupted LPC DMA Data) in AMD Publication #46837,
523 * "SB700 Family Product Errata", Rev. 1.0, March 2010.
524 */
525static void force_disable_hpet_msi(struct pci_dev *unused)
526{
527	hpet_msi_disable = true;
528}
529
530DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
531			 force_disable_hpet_msi);
532
533#endif
534
535#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
536/* Set correct numa_node information for AMD NB functions */
537static void quirk_amd_nb_node(struct pci_dev *dev)
538{
539	struct pci_dev *nb_ht;
540	unsigned int devfn;
541	u32 node;
542	u32 val;
543
544	devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
545	nb_ht = pci_get_slot(dev->bus, devfn);
546	if (!nb_ht)
547		return;
548
549	pci_read_config_dword(nb_ht, 0x60, &val);
550	node = pcibus_to_node(dev->bus) | (val & 7);
551	/*
552	 * Some hardware may return an invalid node ID,
553	 * so check it first:
554	 */
555	if (node_online(node))
556		set_dev_node(&dev->dev, node);
557	pci_dev_put(nb_ht);
558}
559
560DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
561			quirk_amd_nb_node);
562DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
563			quirk_amd_nb_node);
564DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
565			quirk_amd_nb_node);
566DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
567			quirk_amd_nb_node);
568DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
569			quirk_amd_nb_node);
570DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
571			quirk_amd_nb_node);
572DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
573			quirk_amd_nb_node);
574DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
575			quirk_amd_nb_node);
576DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
577			quirk_amd_nb_node);
578DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
579			quirk_amd_nb_node);
580DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
581			quirk_amd_nb_node);
582DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
583			quirk_amd_nb_node);
584DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
585			quirk_amd_nb_node);
586DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
587			quirk_amd_nb_node);
588DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
589			quirk_amd_nb_node);
590
591#endif
592
593#ifdef CONFIG_PCI
594/*
595 * Processor does not ensure DRAM scrub read/write sequence
596 * is atomic wrt accesses to CC6 save state area. Therefore
597 * if a concurrent scrub read/write access is to same address
598 * the entry may appear as if it is not written. This quirk
599 * applies to Fam16h models 00h-0Fh
600 *
601 * See "Revision Guide" for AMD F16h models 00h-0fh,
602 * document 51810 rev. 3.04, Nov 2013
603 */
604static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
605{
606	u32 val;
607
608	/*
609	 * Suggested workaround:
610	 * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
611	 */
612	pci_read_config_dword(dev, 0x58, &val);
613	if (val & 0x1F) {
614		val &= ~(0x1F);
615		pci_write_config_dword(dev, 0x58, val);
616	}
617
618	pci_read_config_dword(dev, 0x5C, &val);
619	if (val & BIT(0)) {
620		val &= ~BIT(0);
621		pci_write_config_dword(dev, 0x5c, val);
622	}
623}
624
625DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
626			amd_disable_seq_and_redirect_scrub);
627
 
 
 
 
628/* Ivy Bridge, Haswell, Broadwell */
629static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
630{
631	u32 capid0;
632
633	pci_read_config_dword(pdev, 0x84, &capid0);
634
635	if (capid0 & 0x10)
636		enable_copy_mc_fragile();
637}
638
639/* Skylake */
640static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
641{
642	u32 capid0, capid5;
643
644	pci_read_config_dword(pdev, 0x84, &capid0);
645	pci_read_config_dword(pdev, 0x98, &capid5);
646
647	/*
648	 * CAPID0{7:6} indicate whether this is an advanced RAS SKU
649	 * CAPID5{8:5} indicate that various NVDIMM usage modes are
650	 * enabled, so memory machine check recovery is also enabled.
651	 */
652	if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
653		enable_copy_mc_fragile();
654
655}
656DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
657DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
658DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
659DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
 
660#endif
661
662bool x86_apple_machine;
663EXPORT_SYMBOL(x86_apple_machine);
664
665void __init early_platform_quirks(void)
666{
667	x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") ||
668			    dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.");
669}