Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * This file contains code to reset and initialize USB host controllers.
  3 * Some of it includes work-arounds for PCI hardware and BIOS quirks.
  4 * It may need to run early during booting -- before USB would normally
  5 * initialize -- to ensure that Linux doesn't use any legacy modes.
  6 *
  7 *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
  8 *  (and others)
  9 */
 10
 11#include <linux/types.h>
 12#include <linux/kernel.h>
 13#include <linux/pci.h>
 14#include <linux/init.h>
 15#include <linux/delay.h>
 
 16#include <linux/acpi.h>
 17#include <linux/dmi.h>
 18#include "pci-quirks.h"
 19#include "xhci-ext-caps.h"
 20
 21
 22#define UHCI_USBLEGSUP		0xc0		/* legacy support */
 23#define UHCI_USBCMD		0		/* command register */
 24#define UHCI_USBINTR		4		/* interrupt register */
 25#define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
 26#define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
 27#define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
 28#define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
 29#define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
 30#define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
 31#define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
 32
 33#define OHCI_CONTROL		0x04
 34#define OHCI_CMDSTATUS		0x08
 35#define OHCI_INTRSTATUS		0x0c
 36#define OHCI_INTRENABLE		0x10
 37#define OHCI_INTRDISABLE	0x14
 38#define OHCI_FMINTERVAL		0x34
 
 39#define OHCI_HCR		(1 << 0)	/* host controller reset */
 40#define OHCI_OCR		(1 << 3)	/* ownership change request */
 41#define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
 42#define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
 43#define OHCI_INTR_OC		(1 << 30)	/* ownership change */
 44
 45#define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
 46#define EHCI_USBCMD		0		/* command register */
 47#define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
 48#define EHCI_USBSTS		4		/* status register */
 49#define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
 50#define EHCI_USBINTR		8		/* interrupt register */
 51#define EHCI_CONFIGFLAG		0x40		/* configured flag register */
 52#define EHCI_USBLEGSUP		0		/* legacy support register */
 53#define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
 54#define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
 55#define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
 56#define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
 57
 58/* AMD quirk use */
 59#define	AB_REG_BAR_LOW		0xe0
 60#define	AB_REG_BAR_HIGH		0xe1
 61#define	AB_REG_BAR_SB700	0xf0
 62#define	AB_INDX(addr)		((addr) + 0x00)
 63#define	AB_DATA(addr)		((addr) + 0x04)
 64#define	AX_INDXC		0x30
 65#define	AX_DATAC		0x34
 66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67#define	NB_PCIE_INDX_ADDR	0xe0
 68#define	NB_PCIE_INDX_DATA	0xe4
 69#define	PCIE_P_CNTL		0x10040
 70#define	BIF_NB			0x10002
 71#define	NB_PIF0_PWRDOWN_0	0x01100012
 72#define	NB_PIF0_PWRDOWN_1	0x01100013
 73
 74#define USB_INTEL_XUSB2PR      0xD0
 
 75#define USB_INTEL_USB3_PSSEN   0xD8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77static struct amd_chipset_info {
 78	struct pci_dev	*nb_dev;
 79	struct pci_dev	*smbus_dev;
 80	int nb_type;
 81	int sb_type;
 82	int isoc_reqs;
 83	int probe_count;
 84	int probe_result;
 85} amd_chipset;
 86
 87static DEFINE_SPINLOCK(amd_lock);
 88
 89int usb_amd_find_chipset_info(void)
 
 
 
 
 
 
 
 
 90{
 91	u8 rev = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92	unsigned long flags;
 93	struct amd_chipset_info info;
 94	int ret;
 95
 96	spin_lock_irqsave(&amd_lock, flags);
 97
 98	/* probe only once */
 99	if (amd_chipset.probe_count > 0) {
100		amd_chipset.probe_count++;
101		spin_unlock_irqrestore(&amd_lock, flags);
102		return amd_chipset.probe_result;
103	}
104	memset(&info, 0, sizeof(info));
105	spin_unlock_irqrestore(&amd_lock, flags);
106
107	info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
108	if (info.smbus_dev) {
109		rev = info.smbus_dev->revision;
110		if (rev >= 0x40)
111			info.sb_type = 1;
112		else if (rev >= 0x30 && rev <= 0x3b)
113			info.sb_type = 3;
114	} else {
115		info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
116						0x780b, NULL);
117		if (!info.smbus_dev) {
118			ret = 0;
119			goto commit;
120		}
121
122		rev = info.smbus_dev->revision;
123		if (rev >= 0x11 && rev <= 0x18)
124			info.sb_type = 2;
 
 
 
 
 
 
 
 
 
125	}
126
127	if (info.sb_type == 0) {
128		if (info.smbus_dev) {
129			pci_dev_put(info.smbus_dev);
130			info.smbus_dev = NULL;
131		}
132		ret = 0;
133		goto commit;
134	}
135
136	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
137	if (info.nb_dev) {
138		info.nb_type = 1;
139	} else {
140		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
141		if (info.nb_dev) {
142			info.nb_type = 2;
143		} else {
144			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
145						     0x9600, NULL);
146			if (info.nb_dev)
147				info.nb_type = 3;
148		}
149	}
150
151	ret = info.probe_result = 1;
152	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
153
154commit:
155
156	spin_lock_irqsave(&amd_lock, flags);
157	if (amd_chipset.probe_count > 0) {
158		/* race - someone else was faster - drop devices */
159
160		/* Mark that we where here */
161		amd_chipset.probe_count++;
162		ret = amd_chipset.probe_result;
163
164		spin_unlock_irqrestore(&amd_lock, flags);
165
166		if (info.nb_dev)
167			pci_dev_put(info.nb_dev);
168		if (info.smbus_dev)
169			pci_dev_put(info.smbus_dev);
170
171	} else {
172		/* no race - commit the result */
173		info.probe_count++;
174		amd_chipset = info;
175		spin_unlock_irqrestore(&amd_lock, flags);
176	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
178	return ret;
 
 
 
 
 
179}
180EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
182/*
183 * The hardware normally enables the A-link power management feature, which
184 * lets the system lower the power consumption in idle states.
185 *
186 * This USB quirk prevents the link going into that lower power state
187 * during isochronous transfers.
188 *
189 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
190 * some AMD platforms may stutter or have breaks occasionally.
191 */
192static void usb_amd_quirk_pll(int disable)
193{
194	u32 addr, addr_low, addr_high, val;
195	u32 bit = disable ? 0 : 1;
196	unsigned long flags;
197
198	spin_lock_irqsave(&amd_lock, flags);
199
200	if (disable) {
201		amd_chipset.isoc_reqs++;
202		if (amd_chipset.isoc_reqs > 1) {
203			spin_unlock_irqrestore(&amd_lock, flags);
204			return;
205		}
206	} else {
207		amd_chipset.isoc_reqs--;
208		if (amd_chipset.isoc_reqs > 0) {
209			spin_unlock_irqrestore(&amd_lock, flags);
210			return;
211		}
212	}
213
214	if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) {
 
 
215		outb_p(AB_REG_BAR_LOW, 0xcd6);
216		addr_low = inb_p(0xcd7);
217		outb_p(AB_REG_BAR_HIGH, 0xcd6);
218		addr_high = inb_p(0xcd7);
219		addr = addr_high << 8 | addr_low;
220
221		outl_p(0x30, AB_INDX(addr));
222		outl_p(0x40, AB_DATA(addr));
223		outl_p(0x34, AB_INDX(addr));
224		val = inl_p(AB_DATA(addr));
225	} else if (amd_chipset.sb_type == 3) {
 
226		pci_read_config_dword(amd_chipset.smbus_dev,
227					AB_REG_BAR_SB700, &addr);
228		outl(AX_INDXC, AB_INDX(addr));
229		outl(0x40, AB_DATA(addr));
230		outl(AX_DATAC, AB_INDX(addr));
231		val = inl(AB_DATA(addr));
232	} else {
233		spin_unlock_irqrestore(&amd_lock, flags);
234		return;
235	}
236
237	if (disable) {
238		val &= ~0x08;
239		val |= (1 << 4) | (1 << 9);
240	} else {
241		val |= 0x08;
242		val &= ~((1 << 4) | (1 << 9));
243	}
244	outl_p(val, AB_DATA(addr));
245
246	if (!amd_chipset.nb_dev) {
247		spin_unlock_irqrestore(&amd_lock, flags);
248		return;
249	}
250
251	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
252		addr = PCIE_P_CNTL;
253		pci_write_config_dword(amd_chipset.nb_dev,
254					NB_PCIE_INDX_ADDR, addr);
255		pci_read_config_dword(amd_chipset.nb_dev,
256					NB_PCIE_INDX_DATA, &val);
257
258		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
259		val |= bit | (bit << 3) | (bit << 12);
260		val |= ((!bit) << 4) | ((!bit) << 9);
261		pci_write_config_dword(amd_chipset.nb_dev,
262					NB_PCIE_INDX_DATA, val);
263
264		addr = BIF_NB;
265		pci_write_config_dword(amd_chipset.nb_dev,
266					NB_PCIE_INDX_ADDR, addr);
267		pci_read_config_dword(amd_chipset.nb_dev,
268					NB_PCIE_INDX_DATA, &val);
269		val &= ~(1 << 8);
270		val |= bit << 8;
271
272		pci_write_config_dword(amd_chipset.nb_dev,
273					NB_PCIE_INDX_DATA, val);
274	} else if (amd_chipset.nb_type == 2) {
275		addr = NB_PIF0_PWRDOWN_0;
276		pci_write_config_dword(amd_chipset.nb_dev,
277					NB_PCIE_INDX_ADDR, addr);
278		pci_read_config_dword(amd_chipset.nb_dev,
279					NB_PCIE_INDX_DATA, &val);
280		if (disable)
281			val &= ~(0x3f << 7);
282		else
283			val |= 0x3f << 7;
284
285		pci_write_config_dword(amd_chipset.nb_dev,
286					NB_PCIE_INDX_DATA, val);
287
288		addr = NB_PIF0_PWRDOWN_1;
289		pci_write_config_dword(amd_chipset.nb_dev,
290					NB_PCIE_INDX_ADDR, addr);
291		pci_read_config_dword(amd_chipset.nb_dev,
292					NB_PCIE_INDX_DATA, &val);
293		if (disable)
294			val &= ~(0x3f << 7);
295		else
296			val |= 0x3f << 7;
297
298		pci_write_config_dword(amd_chipset.nb_dev,
299					NB_PCIE_INDX_DATA, val);
300	}
301
302	spin_unlock_irqrestore(&amd_lock, flags);
303	return;
304}
305
306void usb_amd_quirk_pll_disable(void)
307{
308	usb_amd_quirk_pll(1);
309}
310EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312void usb_amd_quirk_pll_enable(void)
313{
314	usb_amd_quirk_pll(0);
315}
316EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
317
318void usb_amd_dev_put(void)
319{
320	struct pci_dev *nb, *smbus;
321	unsigned long flags;
322
323	spin_lock_irqsave(&amd_lock, flags);
324
325	amd_chipset.probe_count--;
326	if (amd_chipset.probe_count > 0) {
327		spin_unlock_irqrestore(&amd_lock, flags);
328		return;
329	}
330
331	/* save them to pci_dev_put outside of spinlock */
332	nb    = amd_chipset.nb_dev;
333	smbus = amd_chipset.smbus_dev;
334
335	amd_chipset.nb_dev = NULL;
336	amd_chipset.smbus_dev = NULL;
337	amd_chipset.nb_type = 0;
338	amd_chipset.sb_type = 0;
339	amd_chipset.isoc_reqs = 0;
340	amd_chipset.probe_result = 0;
341
342	spin_unlock_irqrestore(&amd_lock, flags);
343
344	if (nb)
345		pci_dev_put(nb);
346	if (smbus)
347		pci_dev_put(smbus);
348}
349EXPORT_SYMBOL_GPL(usb_amd_dev_put);
350
351/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352 * Make sure the controller is completely inactive, unable to
353 * generate interrupts or do DMA.
354 */
355void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
356{
357	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
358	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
359	 */
360	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
361
362	/* Reset the HC - this will force us to get a
363	 * new notification of any already connected
364	 * ports due to the virtual disconnect that it
365	 * implies.
366	 */
367	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
368	mb();
369	udelay(5);
370	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
371		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
372
373	/* Just to be safe, disable interrupt requests and
374	 * make sure the controller is stopped.
375	 */
376	outw(0, base + UHCI_USBINTR);
377	outw(0, base + UHCI_USBCMD);
378}
379EXPORT_SYMBOL_GPL(uhci_reset_hc);
380
381/*
382 * Initialize a controller that was newly discovered or has just been
383 * resumed.  In either case we can't be sure of its previous state.
384 *
385 * Returns: 1 if the controller was reset, 0 otherwise.
386 */
387int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
388{
389	u16 legsup;
390	unsigned int cmd, intr;
391
392	/*
393	 * When restarting a suspended controller, we expect all the
394	 * settings to be the same as we left them:
395	 *
396	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
397	 *	Controller is stopped and configured with EGSM set;
398	 *	No interrupts enabled except possibly Resume Detect.
399	 *
400	 * If any of these conditions are violated we do a complete reset.
401	 */
402	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
403	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
404		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
405				__func__, legsup);
406		goto reset_needed;
407	}
408
409	cmd = inw(base + UHCI_USBCMD);
410	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
411			!(cmd & UHCI_USBCMD_EGSM)) {
412		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
413				__func__, cmd);
414		goto reset_needed;
415	}
416
417	intr = inw(base + UHCI_USBINTR);
418	if (intr & (~UHCI_USBINTR_RESUME)) {
419		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
420				__func__, intr);
421		goto reset_needed;
422	}
423	return 0;
424
425reset_needed:
426	dev_dbg(&pdev->dev, "Performing full reset\n");
427	uhci_reset_hc(pdev, base);
428	return 1;
429}
430EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
431
432static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
433{
434	u16 cmd;
435	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
436}
437
438#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
439#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
440
441static void __devinit quirk_usb_handoff_uhci(struct pci_dev *pdev)
442{
443	unsigned long base = 0;
444	int i;
445
446	if (!pio_enabled(pdev))
447		return;
448
449	for (i = 0; i < PCI_ROM_RESOURCE; i++)
450		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
451			base = pci_resource_start(pdev, i);
452			break;
453		}
454
455	if (base)
456		uhci_check_and_reset_hc(pdev, base);
457}
458
459static int __devinit mmio_resource_enabled(struct pci_dev *pdev, int idx)
460{
461	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
462}
463
464static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
465{
466	void __iomem *base;
467	u32 control;
 
 
 
468
469	if (!mmio_resource_enabled(pdev, 0))
470		return;
471
472	base = pci_ioremap_bar(pdev, 0);
473	if (base == NULL)
474		return;
475
 
 
 
 
 
 
 
476	control = readl(base + OHCI_CONTROL);
477
478/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
479#ifdef __hppa__
480#define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
481#else
482#define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
483
484	if (control & OHCI_CTRL_IR) {
485		int wait_time = 500; /* arbitrary; 5 seconds */
486		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
487		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
488		while (wait_time > 0 &&
489				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
490			wait_time -= 10;
491			msleep(10);
492		}
493		if (wait_time <= 0)
494			dev_warn(&pdev->dev, "OHCI: BIOS handoff failed"
495					" (BIOS bug?) %08x\n",
496					readl(base + OHCI_CONTROL));
497	}
498#endif
499
500	/* reset controller, preserving RWC (and possibly IR) */
 
 
 
501	writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
502	readl(base + OHCI_CONTROL);
503
504	/* Some NVIDIA controllers stop working if kept in RESET for too long */
505	if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
506		u32 fminterval;
507		int cnt;
508
509		/* drive reset for at least 50 ms (7.1.7.5) */
510		msleep(50);
511
512		/* software reset of the controller, preserving HcFmInterval */
513		fminterval = readl(base + OHCI_FMINTERVAL);
514		writel(OHCI_HCR, base + OHCI_CMDSTATUS);
515
516		/* reset requires max 10 us delay */
517		for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
518			if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
519				break;
520			udelay(1);
521		}
522		writel(fminterval, base + OHCI_FMINTERVAL);
523
524		/* Now we're in the SUSPEND state with all devices reset
525		 * and wakeups and interrupts disabled
526		 */
 
 
527	}
528
529	/*
530	 * disable interrupts
531	 */
532	writel(~(u32)0, base + OHCI_INTRDISABLE);
533	writel(~(u32)0, base + OHCI_INTRSTATUS);
534
 
535	iounmap(base);
536}
537
538static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
539	{
540		/*  Pegatron Lucid (ExoPC) */
541		.matches = {
542			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
543			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
544		},
545	},
546	{
547		/*  Pegatron Lucid (Ordissimo AIRIS) */
548		.matches = {
549			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
550			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551		},
552	},
553	{ }
554};
555
556static void __devinit ehci_bios_handoff(struct pci_dev *pdev,
557					void __iomem *op_reg_base,
558					u32 cap, u8 offset)
559{
560	int try_handoff = 1, tried_handoff = 0;
561
562	/* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
563	 * the handoff on its unused controller.  Skip it. */
564	if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
 
 
 
 
 
565		if (dmi_check_system(ehci_dmi_nohandoff_table))
566			try_handoff = 0;
567	}
568
569	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
570		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
571
572#if 0
573/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
574 * but that seems dubious in general (the BIOS left it off intentionally)
575 * and is known to prevent some systems from booting.  so we won't do this
576 * unless maybe we can determine when we're on a system that needs SMI forced.
577 */
578		/* BIOS workaround (?): be sure the pre-Linux code
579		 * receives the SMI
580		 */
581		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
582		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
583				       val | EHCI_USBLEGCTLSTS_SOOE);
584#endif
585
586		/* some systems get upset if this semaphore is
587		 * set for any other reason than forcing a BIOS
588		 * handoff..
589		 */
590		pci_write_config_byte(pdev, offset + 3, 1);
591	}
592
593	/* if boot firmware now owns EHCI, spin till it hands it over. */
594	if (try_handoff) {
595		int msec = 1000;
596		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
597			tried_handoff = 1;
598			msleep(10);
599			msec -= 10;
600			pci_read_config_dword(pdev, offset, &cap);
601		}
602	}
603
604	if (cap & EHCI_USBLEGSUP_BIOS) {
605		/* well, possibly buggy BIOS... try to shut it down,
606		 * and hope nothing goes too wrong
607		 */
608		if (try_handoff)
609			dev_warn(&pdev->dev, "EHCI: BIOS handoff failed"
610				 " (BIOS bug?) %08x\n", cap);
 
611		pci_write_config_byte(pdev, offset + 2, 0);
612	}
613
614	/* just in case, always disable EHCI SMIs */
615	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
616
617	/* If the BIOS ever owned the controller then we can't expect
618	 * any power sessions to remain intact.
619	 */
620	if (tried_handoff)
621		writel(0, op_reg_base + EHCI_CONFIGFLAG);
622}
623
624static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
625{
626	void __iomem *base, *op_reg_base;
627	u32	hcc_params, cap, val;
628	u8	offset, cap_length;
629	int	wait_time, delta, count = 256/4;
630
631	if (!mmio_resource_enabled(pdev, 0))
632		return;
633
634	base = pci_ioremap_bar(pdev, 0);
635	if (base == NULL)
636		return;
637
638	cap_length = readb(base);
639	op_reg_base = base + cap_length;
640
641	/* EHCI 0.96 and later may have "extended capabilities"
642	 * spec section 5.1 explains the bios handoff, e.g. for
643	 * booting from USB disk or using a usb keyboard
644	 */
645	hcc_params = readl(base + EHCI_HCC_PARAMS);
646	offset = (hcc_params >> 8) & 0xff;
647	while (offset && --count) {
648		pci_read_config_dword(pdev, offset, &cap);
649
650		switch (cap & 0xff) {
651		case 1:
652			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
653			break;
654		case 0: /* Illegal reserved cap, set cap=0 so we exit */
655			cap = 0; /* then fallthrough... */
656		default:
657			dev_warn(&pdev->dev, "EHCI: unrecognized capability "
658				 "%02x\n", cap & 0xff);
 
659		}
660		offset = (cap >> 8) & 0xff;
661	}
662	if (!count)
663		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
664
665	/*
666	 * halt EHCI & disable its interrupts in any case
667	 */
668	val = readl(op_reg_base + EHCI_USBSTS);
669	if ((val & EHCI_USBSTS_HALTED) == 0) {
670		val = readl(op_reg_base + EHCI_USBCMD);
671		val &= ~EHCI_USBCMD_RUN;
672		writel(val, op_reg_base + EHCI_USBCMD);
673
674		wait_time = 2000;
675		delta = 100;
676		do {
677			writel(0x3f, op_reg_base + EHCI_USBSTS);
678			udelay(delta);
679			wait_time -= delta;
680			val = readl(op_reg_base + EHCI_USBSTS);
681			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
682				break;
683			}
684		} while (wait_time > 0);
685	}
686	writel(0, op_reg_base + EHCI_USBINTR);
687	writel(0x3f, op_reg_base + EHCI_USBSTS);
688
689	iounmap(base);
690}
691
692/*
693 * handshake - spin reading a register until handshake completes
694 * @ptr: address of hc register to be read
695 * @mask: bits to look at in result of read
696 * @done: value of those bits when handshake succeeds
697 * @wait_usec: timeout in microseconds
698 * @delay_usec: delay in microseconds to wait between polling
699 *
700 * Polls a register every delay_usec microseconds.
701 * Returns 0 when the mask bits have the value done.
702 * Returns -ETIMEDOUT if this condition is not true after
703 * wait_usec microseconds have passed.
704 */
705static int handshake(void __iomem *ptr, u32 mask, u32 done,
706		int wait_usec, int delay_usec)
707{
708	u32	result;
709
710	do {
711		result = readl(ptr);
712		result &= mask;
713		if (result == done)
714			return 0;
715		udelay(delay_usec);
716		wait_usec -= delay_usec;
717	} while (wait_usec > 0);
718	return -ETIMEDOUT;
719}
720
721bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
722{
723	return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
724		pdev->vendor == PCI_VENDOR_ID_INTEL &&
725		pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
726}
727EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
728
729/*
730 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
731 * share some number of ports.  These ports can be switched between either
732 * controller.  Not all of the ports under the EHCI host controller may be
733 * switchable.
734 *
735 * The ports should be switched over to xHCI before PCI probes for any device
736 * start.  This avoids active devices under EHCI being disconnected during the
737 * port switchover, which could cause loss of data on USB storage devices, or
738 * failed boot when the root file system is on a USB mass storage device and is
739 * enumerated under EHCI first.
740 *
741 * We write into the xHC's PCI configuration space in some Intel-specific
742 * registers to switch the ports over.  The USB 3.0 terminations and the USB
743 * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
744 * terminations before switching the USB 2.0 wires over, so that USB 3.0
745 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
746 */
747void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
748{
749	u32		ports_available;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
750
751	ports_available = 0xffffffff;
752	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
753	 * Register, to turn on SuperSpeed terminations for all
754	 * available ports.
755	 */
756	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
757			cpu_to_le32(ports_available));
758
759	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
760			&ports_available);
761	dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
762			"under xHCI: 0x%x\n", ports_available);
 
 
 
 
 
 
 
 
 
 
 
763
764	ports_available = 0xffffffff;
765	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
766	 * switch the USB 2.0 power and data lines over to the xHCI
767	 * host.
768	 */
769	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
770			cpu_to_le32(ports_available));
771
772	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
773			&ports_available);
774	dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
775			"to xHCI: 0x%x\n", ports_available);
 
 
 
 
 
 
 
 
776}
777EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
778
779/**
780 * PCI Quirks for xHCI.
781 *
782 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
783 * It signals to the BIOS that the OS wants control of the host controller,
784 * and then waits 5 seconds for the BIOS to hand over control.
785 * If we timeout, assume the BIOS is broken and take control anyway.
786 */
787static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
788{
789	void __iomem *base;
790	int ext_cap_offset;
791	void __iomem *op_reg_base;
792	u32 val;
793	int timeout;
 
794
795	if (!mmio_resource_enabled(pdev, 0))
796		return;
797
798	base = ioremap_nocache(pci_resource_start(pdev, 0),
799				pci_resource_len(pdev, 0));
800	if (base == NULL)
801		return;
802
803	/*
804	 * Find the Legacy Support Capability register -
805	 * this is optional for xHCI host controllers.
806	 */
807	ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
808	do {
809		if (!ext_cap_offset)
810			/* We've reached the end of the extended capabilities */
811			goto hc_init;
812		val = readl(base + ext_cap_offset);
813		if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
814			break;
815		ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
816	} while (1);
 
 
 
 
 
 
 
 
 
817
818	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
819	if (val & XHCI_HC_BIOS_OWNED) {
820		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
821
822		/* Wait for 5 seconds with 10 microsecond polling interval */
823		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
824				0, 5000, 10);
825
826		/* Assume a buggy BIOS and take HC ownership anyway */
827		if (timeout) {
828			dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
829					" (BIOS bug ?) %08x\n", val);
 
830			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
831		}
832	}
833
834	/* Disable any BIOS SMIs */
835	writel(XHCI_LEGACY_DISABLE_SMI,
836			base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
 
 
 
 
837
838	if (usb_is_intel_switchable_xhci(pdev))
839		usb_enable_xhci_ports(pdev);
840hc_init:
 
 
 
841	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
842
843	/* Wait for the host controller to be ready before writing any
844	 * operational or runtime registers.  Wait 5 seconds and no more.
845	 */
846	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
847			5000, 10);
848	/* Assume a buggy HC and start HC initialization anyway */
849	if (timeout) {
850		val = readl(op_reg_base + XHCI_STS_OFFSET);
851		dev_warn(&pdev->dev,
852				"xHCI HW not ready after 5 sec (HC bug?) "
853				"status = 0x%x\n", val);
854	}
855
856	/* Send the halt and disable interrupts command */
857	val = readl(op_reg_base + XHCI_CMD_OFFSET);
858	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
859	writel(val, op_reg_base + XHCI_CMD_OFFSET);
860
861	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
862	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
863			XHCI_MAX_HALT_USEC, 125);
864	if (timeout) {
865		val = readl(op_reg_base + XHCI_STS_OFFSET);
866		dev_warn(&pdev->dev,
867				"xHCI HW did not halt within %d usec "
868				"status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
869	}
870
 
871	iounmap(base);
872}
873
874static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
875{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
876	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
877		quirk_usb_handoff_uhci(pdev);
878	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
879		quirk_usb_handoff_ohci(pdev);
880	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
881		quirk_usb_disable_ehci(pdev);
882	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
883		quirk_usb_handoff_xhci(pdev);
 
884}
885DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
 
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This file contains code to reset and initialize USB host controllers.
   4 * Some of it includes work-arounds for PCI hardware and BIOS quirks.
   5 * It may need to run early during booting -- before USB would normally
   6 * initialize -- to ensure that Linux doesn't use any legacy modes.
   7 *
   8 *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
   9 *  (and others)
  10 */
  11
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/pci.h>
 
  15#include <linux/delay.h>
  16#include <linux/export.h>
  17#include <linux/acpi.h>
  18#include <linux/dmi.h>
  19#include "pci-quirks.h"
  20#include "xhci-ext-caps.h"
  21
  22
  23#define UHCI_USBLEGSUP		0xc0		/* legacy support */
  24#define UHCI_USBCMD		0		/* command register */
  25#define UHCI_USBINTR		4		/* interrupt register */
  26#define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
  27#define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
  28#define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
  29#define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
  30#define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
  31#define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
  32#define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
  33
  34#define OHCI_CONTROL		0x04
  35#define OHCI_CMDSTATUS		0x08
  36#define OHCI_INTRSTATUS		0x0c
  37#define OHCI_INTRENABLE		0x10
  38#define OHCI_INTRDISABLE	0x14
  39#define OHCI_FMINTERVAL		0x34
  40#define OHCI_HCFS		(3 << 6)	/* hc functional state */
  41#define OHCI_HCR		(1 << 0)	/* host controller reset */
  42#define OHCI_OCR		(1 << 3)	/* ownership change request */
  43#define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
  44#define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
  45#define OHCI_INTR_OC		(1 << 30)	/* ownership change */
  46
  47#define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
  48#define EHCI_USBCMD		0		/* command register */
  49#define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
  50#define EHCI_USBSTS		4		/* status register */
  51#define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
  52#define EHCI_USBINTR		8		/* interrupt register */
  53#define EHCI_CONFIGFLAG		0x40		/* configured flag register */
  54#define EHCI_USBLEGSUP		0		/* legacy support register */
  55#define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
  56#define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
  57#define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
  58#define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
  59
  60/* AMD quirk use */
  61#define	AB_REG_BAR_LOW		0xe0
  62#define	AB_REG_BAR_HIGH		0xe1
  63#define	AB_REG_BAR_SB700	0xf0
  64#define	AB_INDX(addr)		((addr) + 0x00)
  65#define	AB_DATA(addr)		((addr) + 0x04)
  66#define	AX_INDXC		0x30
  67#define	AX_DATAC		0x34
  68
  69#define PT_ADDR_INDX		0xE8
  70#define PT_READ_INDX		0xE4
  71#define PT_SIG_1_ADDR		0xA520
  72#define PT_SIG_2_ADDR		0xA521
  73#define PT_SIG_3_ADDR		0xA522
  74#define PT_SIG_4_ADDR		0xA523
  75#define PT_SIG_1_DATA		0x78
  76#define PT_SIG_2_DATA		0x56
  77#define PT_SIG_3_DATA		0x34
  78#define PT_SIG_4_DATA		0x12
  79#define PT4_P1_REG		0xB521
  80#define PT4_P2_REG		0xB522
  81#define PT2_P1_REG		0xD520
  82#define PT2_P2_REG		0xD521
  83#define PT1_P1_REG		0xD522
  84#define PT1_P2_REG		0xD523
  85
  86#define	NB_PCIE_INDX_ADDR	0xe0
  87#define	NB_PCIE_INDX_DATA	0xe4
  88#define	PCIE_P_CNTL		0x10040
  89#define	BIF_NB			0x10002
  90#define	NB_PIF0_PWRDOWN_0	0x01100012
  91#define	NB_PIF0_PWRDOWN_1	0x01100013
  92
  93#define USB_INTEL_XUSB2PR      0xD0
  94#define USB_INTEL_USB2PRM      0xD4
  95#define USB_INTEL_USB3_PSSEN   0xD8
  96#define USB_INTEL_USB3PRM      0xDC
  97
  98/* ASMEDIA quirk use */
  99#define ASMT_DATA_WRITE0_REG	0xF8
 100#define ASMT_DATA_WRITE1_REG	0xFC
 101#define ASMT_CONTROL_REG	0xE0
 102#define ASMT_CONTROL_WRITE_BIT	0x02
 103#define ASMT_WRITEREG_CMD	0x10423
 104#define ASMT_FLOWCTL_ADDR	0xFA30
 105#define ASMT_FLOWCTL_DATA	0xBA
 106#define ASMT_PSEUDO_DATA	0
 107
 108/*
 109 * amd_chipset_gen values represent AMD different chipset generations
 110 */
 111enum amd_chipset_gen {
 112	NOT_AMD_CHIPSET = 0,
 113	AMD_CHIPSET_SB600,
 114	AMD_CHIPSET_SB700,
 115	AMD_CHIPSET_SB800,
 116	AMD_CHIPSET_HUDSON2,
 117	AMD_CHIPSET_BOLTON,
 118	AMD_CHIPSET_YANGTZE,
 119	AMD_CHIPSET_TAISHAN,
 120	AMD_CHIPSET_UNKNOWN,
 121};
 122
 123struct amd_chipset_type {
 124	enum amd_chipset_gen gen;
 125	u8 rev;
 126};
 127
 128static struct amd_chipset_info {
 129	struct pci_dev	*nb_dev;
 130	struct pci_dev	*smbus_dev;
 131	int nb_type;
 132	struct amd_chipset_type sb_type;
 133	int isoc_reqs;
 134	int probe_count;
 135	bool need_pll_quirk;
 136} amd_chipset;
 137
 138static DEFINE_SPINLOCK(amd_lock);
 139
 140/*
 141 * amd_chipset_sb_type_init - initialize amd chipset southbridge type
 142 *
 143 * AMD FCH/SB generation and revision is identified by SMBus controller
 144 * vendor, device and revision IDs.
 145 *
 146 * Returns: 1 if it is an AMD chipset, 0 otherwise.
 147 */
 148static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
 149{
 150	u8 rev = 0;
 151	pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
 152
 153	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
 154			PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
 155	if (pinfo->smbus_dev) {
 156		rev = pinfo->smbus_dev->revision;
 157		if (rev >= 0x10 && rev <= 0x1f)
 158			pinfo->sb_type.gen = AMD_CHIPSET_SB600;
 159		else if (rev >= 0x30 && rev <= 0x3f)
 160			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
 161		else if (rev >= 0x40 && rev <= 0x4f)
 162			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
 163	} else {
 164		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 165				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
 166
 167		if (pinfo->smbus_dev) {
 168			rev = pinfo->smbus_dev->revision;
 169			if (rev >= 0x11 && rev <= 0x14)
 170				pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
 171			else if (rev >= 0x15 && rev <= 0x18)
 172				pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
 173			else if (rev >= 0x39 && rev <= 0x3a)
 174				pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
 175		} else {
 176			pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 177							  0x145c, NULL);
 178			if (pinfo->smbus_dev) {
 179				rev = pinfo->smbus_dev->revision;
 180				pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
 181			} else {
 182				pinfo->sb_type.gen = NOT_AMD_CHIPSET;
 183				return 0;
 184			}
 185		}
 186	}
 187	pinfo->sb_type.rev = rev;
 188	return 1;
 189}
 190
 191void sb800_prefetch(struct device *dev, int on)
 192{
 193	u16 misc;
 194	struct pci_dev *pdev = to_pci_dev(dev);
 195
 196	pci_read_config_word(pdev, 0x50, &misc);
 197	if (on == 0)
 198		pci_write_config_word(pdev, 0x50, misc & 0xfcff);
 199	else
 200		pci_write_config_word(pdev, 0x50, misc | 0x0300);
 201}
 202EXPORT_SYMBOL_GPL(sb800_prefetch);
 203
 204static void usb_amd_find_chipset_info(void)
 205{
 206	unsigned long flags;
 207	struct amd_chipset_info info;
 208	info.need_pll_quirk = 0;
 209
 210	spin_lock_irqsave(&amd_lock, flags);
 211
 212	/* probe only once */
 213	if (amd_chipset.probe_count > 0) {
 214		amd_chipset.probe_count++;
 215		spin_unlock_irqrestore(&amd_lock, flags);
 216		return;
 217	}
 218	memset(&info, 0, sizeof(info));
 219	spin_unlock_irqrestore(&amd_lock, flags);
 220
 221	if (!amd_chipset_sb_type_init(&info)) {
 222		goto commit;
 223	}
 
 
 
 
 
 
 
 
 
 
 
 224
 225	switch (info.sb_type.gen) {
 226	case AMD_CHIPSET_SB700:
 227		info.need_pll_quirk = info.sb_type.rev <= 0x3B;
 228		break;
 229	case AMD_CHIPSET_SB800:
 230	case AMD_CHIPSET_HUDSON2:
 231	case AMD_CHIPSET_BOLTON:
 232		info.need_pll_quirk = 1;
 233		break;
 234	default:
 235		info.need_pll_quirk = 0;
 236		break;
 237	}
 238
 239	if (!info.need_pll_quirk) {
 240		if (info.smbus_dev) {
 241			pci_dev_put(info.smbus_dev);
 242			info.smbus_dev = NULL;
 243		}
 
 244		goto commit;
 245	}
 246
 247	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
 248	if (info.nb_dev) {
 249		info.nb_type = 1;
 250	} else {
 251		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
 252		if (info.nb_dev) {
 253			info.nb_type = 2;
 254		} else {
 255			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 256						     0x9600, NULL);
 257			if (info.nb_dev)
 258				info.nb_type = 3;
 259		}
 260	}
 261
 
 262	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
 263
 264commit:
 265
 266	spin_lock_irqsave(&amd_lock, flags);
 267	if (amd_chipset.probe_count > 0) {
 268		/* race - someone else was faster - drop devices */
 269
 270		/* Mark that we where here */
 271		amd_chipset.probe_count++;
 
 272
 273		spin_unlock_irqrestore(&amd_lock, flags);
 274
 275		pci_dev_put(info.nb_dev);
 276		pci_dev_put(info.smbus_dev);
 
 
 277
 278	} else {
 279		/* no race - commit the result */
 280		info.probe_count++;
 281		amd_chipset = info;
 282		spin_unlock_irqrestore(&amd_lock, flags);
 283	}
 284}
 285
 286int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
 287{
 288	/* Make sure amd chipset type has already been initialized */
 289	usb_amd_find_chipset_info();
 290	if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
 291	    amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
 292		dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
 293		return 1;
 294	}
 295	return 0;
 296}
 297EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
 298
 299bool usb_amd_hang_symptom_quirk(void)
 300{
 301	u8 rev;
 302
 303	usb_amd_find_chipset_info();
 304	rev = amd_chipset.sb_type.rev;
 305	/* SB600 and old version of SB700 have hang symptom bug */
 306	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
 307			(amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 308			 rev >= 0x3a && rev <= 0x3b);
 309}
 310EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
 311
 312bool usb_amd_prefetch_quirk(void)
 313{
 314	usb_amd_find_chipset_info();
 315	/* SB800 needs pre-fetch fix */
 316	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
 317}
 318EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
 319
 320bool usb_amd_quirk_pll_check(void)
 321{
 322	usb_amd_find_chipset_info();
 323	return amd_chipset.need_pll_quirk;
 324}
 325EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_check);
 326
 327/*
 328 * The hardware normally enables the A-link power management feature, which
 329 * lets the system lower the power consumption in idle states.
 330 *
 331 * This USB quirk prevents the link going into that lower power state
 332 * during isochronous transfers.
 333 *
 334 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
 335 * some AMD platforms may stutter or have breaks occasionally.
 336 */
 337static void usb_amd_quirk_pll(int disable)
 338{
 339	u32 addr, addr_low, addr_high, val;
 340	u32 bit = disable ? 0 : 1;
 341	unsigned long flags;
 342
 343	spin_lock_irqsave(&amd_lock, flags);
 344
 345	if (disable) {
 346		amd_chipset.isoc_reqs++;
 347		if (amd_chipset.isoc_reqs > 1) {
 348			spin_unlock_irqrestore(&amd_lock, flags);
 349			return;
 350		}
 351	} else {
 352		amd_chipset.isoc_reqs--;
 353		if (amd_chipset.isoc_reqs > 0) {
 354			spin_unlock_irqrestore(&amd_lock, flags);
 355			return;
 356		}
 357	}
 358
 359	if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
 360			amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
 361			amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
 362		outb_p(AB_REG_BAR_LOW, 0xcd6);
 363		addr_low = inb_p(0xcd7);
 364		outb_p(AB_REG_BAR_HIGH, 0xcd6);
 365		addr_high = inb_p(0xcd7);
 366		addr = addr_high << 8 | addr_low;
 367
 368		outl_p(0x30, AB_INDX(addr));
 369		outl_p(0x40, AB_DATA(addr));
 370		outl_p(0x34, AB_INDX(addr));
 371		val = inl_p(AB_DATA(addr));
 372	} else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 373			amd_chipset.sb_type.rev <= 0x3b) {
 374		pci_read_config_dword(amd_chipset.smbus_dev,
 375					AB_REG_BAR_SB700, &addr);
 376		outl(AX_INDXC, AB_INDX(addr));
 377		outl(0x40, AB_DATA(addr));
 378		outl(AX_DATAC, AB_INDX(addr));
 379		val = inl(AB_DATA(addr));
 380	} else {
 381		spin_unlock_irqrestore(&amd_lock, flags);
 382		return;
 383	}
 384
 385	if (disable) {
 386		val &= ~0x08;
 387		val |= (1 << 4) | (1 << 9);
 388	} else {
 389		val |= 0x08;
 390		val &= ~((1 << 4) | (1 << 9));
 391	}
 392	outl_p(val, AB_DATA(addr));
 393
 394	if (!amd_chipset.nb_dev) {
 395		spin_unlock_irqrestore(&amd_lock, flags);
 396		return;
 397	}
 398
 399	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
 400		addr = PCIE_P_CNTL;
 401		pci_write_config_dword(amd_chipset.nb_dev,
 402					NB_PCIE_INDX_ADDR, addr);
 403		pci_read_config_dword(amd_chipset.nb_dev,
 404					NB_PCIE_INDX_DATA, &val);
 405
 406		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
 407		val |= bit | (bit << 3) | (bit << 12);
 408		val |= ((!bit) << 4) | ((!bit) << 9);
 409		pci_write_config_dword(amd_chipset.nb_dev,
 410					NB_PCIE_INDX_DATA, val);
 411
 412		addr = BIF_NB;
 413		pci_write_config_dword(amd_chipset.nb_dev,
 414					NB_PCIE_INDX_ADDR, addr);
 415		pci_read_config_dword(amd_chipset.nb_dev,
 416					NB_PCIE_INDX_DATA, &val);
 417		val &= ~(1 << 8);
 418		val |= bit << 8;
 419
 420		pci_write_config_dword(amd_chipset.nb_dev,
 421					NB_PCIE_INDX_DATA, val);
 422	} else if (amd_chipset.nb_type == 2) {
 423		addr = NB_PIF0_PWRDOWN_0;
 424		pci_write_config_dword(amd_chipset.nb_dev,
 425					NB_PCIE_INDX_ADDR, addr);
 426		pci_read_config_dword(amd_chipset.nb_dev,
 427					NB_PCIE_INDX_DATA, &val);
 428		if (disable)
 429			val &= ~(0x3f << 7);
 430		else
 431			val |= 0x3f << 7;
 432
 433		pci_write_config_dword(amd_chipset.nb_dev,
 434					NB_PCIE_INDX_DATA, val);
 435
 436		addr = NB_PIF0_PWRDOWN_1;
 437		pci_write_config_dword(amd_chipset.nb_dev,
 438					NB_PCIE_INDX_ADDR, addr);
 439		pci_read_config_dword(amd_chipset.nb_dev,
 440					NB_PCIE_INDX_DATA, &val);
 441		if (disable)
 442			val &= ~(0x3f << 7);
 443		else
 444			val |= 0x3f << 7;
 445
 446		pci_write_config_dword(amd_chipset.nb_dev,
 447					NB_PCIE_INDX_DATA, val);
 448	}
 449
 450	spin_unlock_irqrestore(&amd_lock, flags);
 451	return;
 452}
 453
 454void usb_amd_quirk_pll_disable(void)
 455{
 456	usb_amd_quirk_pll(1);
 457}
 458EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
 459
 460static int usb_asmedia_wait_write(struct pci_dev *pdev)
 461{
 462	unsigned long retry_count;
 463	unsigned char value;
 464
 465	for (retry_count = 1000; retry_count > 0; --retry_count) {
 466
 467		pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
 468
 469		if (value == 0xff) {
 470			dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
 471			return -EIO;
 472		}
 473
 474		if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
 475			return 0;
 476
 477		udelay(50);
 478	}
 479
 480	dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
 481	return -ETIMEDOUT;
 482}
 483
 484void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
 485{
 486	if (usb_asmedia_wait_write(pdev) != 0)
 487		return;
 488
 489	/* send command and address to device */
 490	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
 491	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
 492	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
 493
 494	if (usb_asmedia_wait_write(pdev) != 0)
 495		return;
 496
 497	/* send data to device */
 498	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
 499	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
 500	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
 501}
 502EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
 503
 504void usb_amd_quirk_pll_enable(void)
 505{
 506	usb_amd_quirk_pll(0);
 507}
 508EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
 509
 510void usb_amd_dev_put(void)
 511{
 512	struct pci_dev *nb, *smbus;
 513	unsigned long flags;
 514
 515	spin_lock_irqsave(&amd_lock, flags);
 516
 517	amd_chipset.probe_count--;
 518	if (amd_chipset.probe_count > 0) {
 519		spin_unlock_irqrestore(&amd_lock, flags);
 520		return;
 521	}
 522
 523	/* save them to pci_dev_put outside of spinlock */
 524	nb    = amd_chipset.nb_dev;
 525	smbus = amd_chipset.smbus_dev;
 526
 527	amd_chipset.nb_dev = NULL;
 528	amd_chipset.smbus_dev = NULL;
 529	amd_chipset.nb_type = 0;
 530	memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
 531	amd_chipset.isoc_reqs = 0;
 532	amd_chipset.need_pll_quirk = 0;
 533
 534	spin_unlock_irqrestore(&amd_lock, flags);
 535
 536	pci_dev_put(nb);
 537	pci_dev_put(smbus);
 
 
 538}
 539EXPORT_SYMBOL_GPL(usb_amd_dev_put);
 540
 541/*
 542 * Check if port is disabled in BIOS on AMD Promontory host.
 543 * BIOS Disabled ports may wake on connect/disconnect and need
 544 * driver workaround to keep them disabled.
 545 * Returns true if port is marked disabled.
 546 */
 547bool usb_amd_pt_check_port(struct device *device, int port)
 548{
 549	unsigned char value, port_shift;
 550	struct pci_dev *pdev;
 551	u16 reg;
 552
 553	pdev = to_pci_dev(device);
 554	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR);
 555
 556	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 557	if (value != PT_SIG_1_DATA)
 558		return false;
 559
 560	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR);
 561
 562	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 563	if (value != PT_SIG_2_DATA)
 564		return false;
 565
 566	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR);
 567
 568	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 569	if (value != PT_SIG_3_DATA)
 570		return false;
 571
 572	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR);
 573
 574	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 575	if (value != PT_SIG_4_DATA)
 576		return false;
 577
 578	/* Check disabled port setting, if bit is set port is enabled */
 579	switch (pdev->device) {
 580	case 0x43b9:
 581	case 0x43ba:
 582	/*
 583	 * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba)
 584	 * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0
 585	 * PT4_P2_REG bits[6..0] represents ports 13 to 7
 586	 */
 587		if (port > 6) {
 588			reg = PT4_P2_REG;
 589			port_shift = port - 7;
 590		} else {
 591			reg = PT4_P1_REG;
 592			port_shift = port + 1;
 593		}
 594		break;
 595	case 0x43bb:
 596	/*
 597	 * device is AMD_PROMONTORYA_2(0x43bb)
 598	 * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0
 599	 * PT2_P2_REG bits[5..0] represents ports 9 to 3
 600	 */
 601		if (port > 2) {
 602			reg = PT2_P2_REG;
 603			port_shift = port - 3;
 604		} else {
 605			reg = PT2_P1_REG;
 606			port_shift = port + 5;
 607		}
 608		break;
 609	case 0x43bc:
 610	/*
 611	 * device is AMD_PROMONTORYA_1(0x43bc)
 612	 * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0
 613	 * PT1_P2_REG[5..0] represents ports 9 to 4
 614	 */
 615		if (port > 3) {
 616			reg = PT1_P2_REG;
 617			port_shift = port - 4;
 618		} else {
 619			reg = PT1_P1_REG;
 620			port_shift = port + 4;
 621		}
 622		break;
 623	default:
 624		return false;
 625	}
 626	pci_write_config_word(pdev, PT_ADDR_INDX, reg);
 627	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 628
 629	return !(value & BIT(port_shift));
 630}
 631EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
 632
 633/*
 634 * Make sure the controller is completely inactive, unable to
 635 * generate interrupts or do DMA.
 636 */
 637void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
 638{
 639	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
 640	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
 641	 */
 642	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
 643
 644	/* Reset the HC - this will force us to get a
 645	 * new notification of any already connected
 646	 * ports due to the virtual disconnect that it
 647	 * implies.
 648	 */
 649	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
 650	mb();
 651	udelay(5);
 652	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
 653		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
 654
 655	/* Just to be safe, disable interrupt requests and
 656	 * make sure the controller is stopped.
 657	 */
 658	outw(0, base + UHCI_USBINTR);
 659	outw(0, base + UHCI_USBCMD);
 660}
 661EXPORT_SYMBOL_GPL(uhci_reset_hc);
 662
 663/*
 664 * Initialize a controller that was newly discovered or has just been
 665 * resumed.  In either case we can't be sure of its previous state.
 666 *
 667 * Returns: 1 if the controller was reset, 0 otherwise.
 668 */
 669int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
 670{
 671	u16 legsup;
 672	unsigned int cmd, intr;
 673
 674	/*
 675	 * When restarting a suspended controller, we expect all the
 676	 * settings to be the same as we left them:
 677	 *
 678	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
 679	 *	Controller is stopped and configured with EGSM set;
 680	 *	No interrupts enabled except possibly Resume Detect.
 681	 *
 682	 * If any of these conditions are violated we do a complete reset.
 683	 */
 684	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
 685	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
 686		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
 687				__func__, legsup);
 688		goto reset_needed;
 689	}
 690
 691	cmd = inw(base + UHCI_USBCMD);
 692	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
 693			!(cmd & UHCI_USBCMD_EGSM)) {
 694		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
 695				__func__, cmd);
 696		goto reset_needed;
 697	}
 698
 699	intr = inw(base + UHCI_USBINTR);
 700	if (intr & (~UHCI_USBINTR_RESUME)) {
 701		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
 702				__func__, intr);
 703		goto reset_needed;
 704	}
 705	return 0;
 706
 707reset_needed:
 708	dev_dbg(&pdev->dev, "Performing full reset\n");
 709	uhci_reset_hc(pdev, base);
 710	return 1;
 711}
 712EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
 713
 714static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
 715{
 716	u16 cmd;
 717	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
 718}
 719
 720#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
 721#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
 722
 723static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
 724{
 725	unsigned long base = 0;
 726	int i;
 727
 728	if (!pio_enabled(pdev))
 729		return;
 730
 731	for (i = 0; i < PCI_ROM_RESOURCE; i++)
 732		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
 733			base = pci_resource_start(pdev, i);
 734			break;
 735		}
 736
 737	if (base)
 738		uhci_check_and_reset_hc(pdev, base);
 739}
 740
 741static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
 742{
 743	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
 744}
 745
 746static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
 747{
 748	void __iomem *base;
 749	u32 control;
 750	u32 fminterval = 0;
 751	bool no_fminterval = false;
 752	int cnt;
 753
 754	if (!mmio_resource_enabled(pdev, 0))
 755		return;
 756
 757	base = pci_ioremap_bar(pdev, 0);
 758	if (base == NULL)
 759		return;
 760
 761	/*
 762	 * ULi M5237 OHCI controller locks the whole system when accessing
 763	 * the OHCI_FMINTERVAL offset.
 764	 */
 765	if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
 766		no_fminterval = true;
 767
 768	control = readl(base + OHCI_CONTROL);
 769
 770/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
 771#ifdef __hppa__
 772#define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
 773#else
 774#define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
 775
 776	if (control & OHCI_CTRL_IR) {
 777		int wait_time = 500; /* arbitrary; 5 seconds */
 778		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
 779		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
 780		while (wait_time > 0 &&
 781				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
 782			wait_time -= 10;
 783			msleep(10);
 784		}
 785		if (wait_time <= 0)
 786			dev_warn(&pdev->dev,
 787				 "OHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 788				 readl(base + OHCI_CONTROL));
 789	}
 790#endif
 791
 792	/* disable interrupts */
 793	writel((u32) ~0, base + OHCI_INTRDISABLE);
 794
 795	/* Go into the USB_RESET state, preserving RWC (and possibly IR) */
 796	writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
 797	readl(base + OHCI_CONTROL);
 798
 799	/* software reset of the controller, preserving HcFmInterval */
 800	if (!no_fminterval)
 
 
 
 
 
 
 
 801		fminterval = readl(base + OHCI_FMINTERVAL);
 
 802
 803	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
 
 
 
 
 
 
 804
 805	/* reset requires max 10 us delay */
 806	for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
 807		if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
 808			break;
 809		udelay(1);
 810	}
 811
 812	if (!no_fminterval)
 813		writel(fminterval, base + OHCI_FMINTERVAL);
 
 
 
 814
 815	/* Now the controller is safely in SUSPEND and nothing can wake it up */
 816	iounmap(base);
 817}
 818
 819static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
 820	{
 821		/*  Pegatron Lucid (ExoPC) */
 822		.matches = {
 823			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
 824			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
 825		},
 826	},
 827	{
 828		/*  Pegatron Lucid (Ordissimo AIRIS) */
 829		.matches = {
 830			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
 831			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 832		},
 833	},
 834	{
 835		/*  Pegatron Lucid (Ordissimo) */
 836		.matches = {
 837			DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
 838			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 839		},
 840	},
 841	{
 842		/* HASEE E200 */
 843		.matches = {
 844			DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
 845			DMI_MATCH(DMI_BOARD_NAME, "E210"),
 846			DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
 847		},
 848	},
 849	{ }
 850};
 851
 852static void ehci_bios_handoff(struct pci_dev *pdev,
 853					void __iomem *op_reg_base,
 854					u32 cap, u8 offset)
 855{
 856	int try_handoff = 1, tried_handoff = 0;
 857
 858	/*
 859	 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
 860	 * the handoff on its unused controller.  Skip it.
 861	 *
 862	 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
 863	 */
 864	if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
 865			pdev->device == 0x27cc)) {
 866		if (dmi_check_system(ehci_dmi_nohandoff_table))
 867			try_handoff = 0;
 868	}
 869
 870	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
 871		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
 872
 873#if 0
 874/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
 875 * but that seems dubious in general (the BIOS left it off intentionally)
 876 * and is known to prevent some systems from booting.  so we won't do this
 877 * unless maybe we can determine when we're on a system that needs SMI forced.
 878 */
 879		/* BIOS workaround (?): be sure the pre-Linux code
 880		 * receives the SMI
 881		 */
 882		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
 883		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
 884				       val | EHCI_USBLEGCTLSTS_SOOE);
 885#endif
 886
 887		/* some systems get upset if this semaphore is
 888		 * set for any other reason than forcing a BIOS
 889		 * handoff..
 890		 */
 891		pci_write_config_byte(pdev, offset + 3, 1);
 892	}
 893
 894	/* if boot firmware now owns EHCI, spin till it hands it over. */
 895	if (try_handoff) {
 896		int msec = 1000;
 897		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
 898			tried_handoff = 1;
 899			msleep(10);
 900			msec -= 10;
 901			pci_read_config_dword(pdev, offset, &cap);
 902		}
 903	}
 904
 905	if (cap & EHCI_USBLEGSUP_BIOS) {
 906		/* well, possibly buggy BIOS... try to shut it down,
 907		 * and hope nothing goes too wrong
 908		 */
 909		if (try_handoff)
 910			dev_warn(&pdev->dev,
 911				 "EHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 912				 cap);
 913		pci_write_config_byte(pdev, offset + 2, 0);
 914	}
 915
 916	/* just in case, always disable EHCI SMIs */
 917	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
 918
 919	/* If the BIOS ever owned the controller then we can't expect
 920	 * any power sessions to remain intact.
 921	 */
 922	if (tried_handoff)
 923		writel(0, op_reg_base + EHCI_CONFIGFLAG);
 924}
 925
 926static void quirk_usb_disable_ehci(struct pci_dev *pdev)
 927{
 928	void __iomem *base, *op_reg_base;
 929	u32	hcc_params, cap, val;
 930	u8	offset, cap_length;
 931	int	wait_time, count = 256/4;
 932
 933	if (!mmio_resource_enabled(pdev, 0))
 934		return;
 935
 936	base = pci_ioremap_bar(pdev, 0);
 937	if (base == NULL)
 938		return;
 939
 940	cap_length = readb(base);
 941	op_reg_base = base + cap_length;
 942
 943	/* EHCI 0.96 and later may have "extended capabilities"
 944	 * spec section 5.1 explains the bios handoff, e.g. for
 945	 * booting from USB disk or using a usb keyboard
 946	 */
 947	hcc_params = readl(base + EHCI_HCC_PARAMS);
 948	offset = (hcc_params >> 8) & 0xff;
 949	while (offset && --count) {
 950		pci_read_config_dword(pdev, offset, &cap);
 951
 952		switch (cap & 0xff) {
 953		case 1:
 954			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
 955			break;
 956		case 0: /* Illegal reserved cap, set cap=0 so we exit */
 957			cap = 0; /* fall through */
 958		default:
 959			dev_warn(&pdev->dev,
 960				 "EHCI: unrecognized capability %02x\n",
 961				 cap & 0xff);
 962		}
 963		offset = (cap >> 8) & 0xff;
 964	}
 965	if (!count)
 966		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
 967
 968	/*
 969	 * halt EHCI & disable its interrupts in any case
 970	 */
 971	val = readl(op_reg_base + EHCI_USBSTS);
 972	if ((val & EHCI_USBSTS_HALTED) == 0) {
 973		val = readl(op_reg_base + EHCI_USBCMD);
 974		val &= ~EHCI_USBCMD_RUN;
 975		writel(val, op_reg_base + EHCI_USBCMD);
 976
 977		wait_time = 2000;
 
 978		do {
 979			writel(0x3f, op_reg_base + EHCI_USBSTS);
 980			udelay(100);
 981			wait_time -= 100;
 982			val = readl(op_reg_base + EHCI_USBSTS);
 983			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
 984				break;
 985			}
 986		} while (wait_time > 0);
 987	}
 988	writel(0, op_reg_base + EHCI_USBINTR);
 989	writel(0x3f, op_reg_base + EHCI_USBSTS);
 990
 991	iounmap(base);
 992}
 993
 994/*
 995 * handshake - spin reading a register until handshake completes
 996 * @ptr: address of hc register to be read
 997 * @mask: bits to look at in result of read
 998 * @done: value of those bits when handshake succeeds
 999 * @wait_usec: timeout in microseconds
1000 * @delay_usec: delay in microseconds to wait between polling
1001 *
1002 * Polls a register every delay_usec microseconds.
1003 * Returns 0 when the mask bits have the value done.
1004 * Returns -ETIMEDOUT if this condition is not true after
1005 * wait_usec microseconds have passed.
1006 */
1007static int handshake(void __iomem *ptr, u32 mask, u32 done,
1008		int wait_usec, int delay_usec)
1009{
1010	u32	result;
1011
1012	do {
1013		result = readl(ptr);
1014		result &= mask;
1015		if (result == done)
1016			return 0;
1017		udelay(delay_usec);
1018		wait_usec -= delay_usec;
1019	} while (wait_usec > 0);
1020	return -ETIMEDOUT;
1021}
1022
 
 
 
 
 
 
 
 
1023/*
1024 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
1025 * share some number of ports.  These ports can be switched between either
1026 * controller.  Not all of the ports under the EHCI host controller may be
1027 * switchable.
1028 *
1029 * The ports should be switched over to xHCI before PCI probes for any device
1030 * start.  This avoids active devices under EHCI being disconnected during the
1031 * port switchover, which could cause loss of data on USB storage devices, or
1032 * failed boot when the root file system is on a USB mass storage device and is
1033 * enumerated under EHCI first.
1034 *
1035 * We write into the xHC's PCI configuration space in some Intel-specific
1036 * registers to switch the ports over.  The USB 3.0 terminations and the USB
1037 * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
1038 * terminations before switching the USB 2.0 wires over, so that USB 3.0
1039 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
1040 */
1041void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
1042{
1043	u32		ports_available;
1044	bool		ehci_found = false;
1045	struct pci_dev	*companion = NULL;
1046
1047	/* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
1048	 * switching ports from EHCI to xHCI
1049	 */
1050	if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
1051	    xhci_pdev->subsystem_device == 0x90a8)
1052		return;
1053
1054	/* make sure an intel EHCI controller exists */
1055	for_each_pci_dev(companion) {
1056		if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
1057		    companion->vendor == PCI_VENDOR_ID_INTEL) {
1058			ehci_found = true;
1059			break;
1060		}
1061	}
1062
1063	if (!ehci_found)
1064		return;
1065
1066	/* Don't switchover the ports if the user hasn't compiled the xHCI
1067	 * driver.  Otherwise they will see "dead" USB ports that don't power
1068	 * the devices.
1069	 */
1070	if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
1071		dev_warn(&xhci_pdev->dev,
1072			 "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n");
1073		dev_warn(&xhci_pdev->dev,
1074				"USB 3.0 devices will work at USB 2.0 speeds.\n");
1075		usb_disable_xhci_ports(xhci_pdev);
1076		return;
1077	}
1078
1079	/* Read USB3PRM, the USB 3.0 Port Routing Mask Register
1080	 * Indicate the ports that can be changed from OS.
1081	 */
1082	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
1083			&ports_available);
1084
1085	dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
1086			ports_available);
1087
 
1088	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
1089	 * Register, to turn on SuperSpeed terminations for the
1090	 * switchable ports.
1091	 */
1092	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
1093			ports_available);
1094
1095	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
1096			&ports_available);
1097	dev_dbg(&xhci_pdev->dev,
1098		"USB 3.0 ports that are now enabled under xHCI: 0x%x\n",
1099		ports_available);
1100
1101	/* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
1102	 * Indicate the USB 2.0 ports to be controlled by the xHCI host.
1103	 */
1104
1105	pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
1106			&ports_available);
1107
1108	dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
1109			ports_available);
1110
 
1111	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
1112	 * switch the USB 2.0 power and data lines over to the xHCI
1113	 * host.
1114	 */
1115	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
1116			ports_available);
1117
1118	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
1119			&ports_available);
1120	dev_dbg(&xhci_pdev->dev,
1121		"USB 2.0 ports that are now switched over to xHCI: 0x%x\n",
1122		ports_available);
1123}
1124EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
1125
1126void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
1127{
1128	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
1129	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
1130}
1131EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
1132
1133/**
1134 * PCI Quirks for xHCI.
1135 *
1136 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
1137 * It signals to the BIOS that the OS wants control of the host controller,
1138 * and then waits 1 second for the BIOS to hand over control.
1139 * If we timeout, assume the BIOS is broken and take control anyway.
1140 */
1141static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
1142{
1143	void __iomem *base;
1144	int ext_cap_offset;
1145	void __iomem *op_reg_base;
1146	u32 val;
1147	int timeout;
1148	int len = pci_resource_len(pdev, 0);
1149
1150	if (!mmio_resource_enabled(pdev, 0))
1151		return;
1152
1153	base = ioremap_nocache(pci_resource_start(pdev, 0), len);
 
1154	if (base == NULL)
1155		return;
1156
1157	/*
1158	 * Find the Legacy Support Capability register -
1159	 * this is optional for xHCI host controllers.
1160	 */
1161	ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY);
1162
1163	if (!ext_cap_offset)
1164		goto hc_init;
1165
1166	if ((ext_cap_offset + sizeof(val)) > len) {
1167		/* We're reading garbage from the controller */
1168		dev_warn(&pdev->dev, "xHCI controller failing to respond");
1169		goto iounmap;
1170	}
1171	val = readl(base + ext_cap_offset);
1172
1173	/* Auto handoff never worked for these devices. Force it and continue */
1174	if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) ||
1175			(pdev->vendor == PCI_VENDOR_ID_RENESAS
1176			 && pdev->device == 0x0014)) {
1177		val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED;
1178		writel(val, base + ext_cap_offset);
1179	}
1180
1181	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
1182	if (val & XHCI_HC_BIOS_OWNED) {
1183		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
1184
1185		/* Wait for 1 second with 10 microsecond polling interval */
1186		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
1187				0, 1000000, 10);
1188
1189		/* Assume a buggy BIOS and take HC ownership anyway */
1190		if (timeout) {
1191			dev_warn(&pdev->dev,
1192				 "xHCI BIOS handoff failed (BIOS bug ?) %08x\n",
1193				 val);
1194			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
1195		}
1196	}
1197
1198	val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1199	/* Mask off (turn off) any enabled SMIs */
1200	val &= XHCI_LEGACY_DISABLE_SMI;
1201	/* Mask all SMI events bits, RW1C */
1202	val |= XHCI_LEGACY_SMI_EVENTS;
1203	/* Disable any BIOS SMIs and clear all SMI events*/
1204	writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1205
 
 
1206hc_init:
1207	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
1208		usb_enable_intel_xhci_ports(pdev);
1209
1210	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
1211
1212	/* Wait for the host controller to be ready before writing any
1213	 * operational or runtime registers.  Wait 5 seconds and no more.
1214	 */
1215	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
1216			5000000, 10);
1217	/* Assume a buggy HC and start HC initialization anyway */
1218	if (timeout) {
1219		val = readl(op_reg_base + XHCI_STS_OFFSET);
1220		dev_warn(&pdev->dev,
1221			 "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n",
1222			 val);
1223	}
1224
1225	/* Send the halt and disable interrupts command */
1226	val = readl(op_reg_base + XHCI_CMD_OFFSET);
1227	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
1228	writel(val, op_reg_base + XHCI_CMD_OFFSET);
1229
1230	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
1231	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
1232			XHCI_MAX_HALT_USEC, 125);
1233	if (timeout) {
1234		val = readl(op_reg_base + XHCI_STS_OFFSET);
1235		dev_warn(&pdev->dev,
1236			 "xHCI HW did not halt within %d usec status = 0x%x\n",
1237			 XHCI_MAX_HALT_USEC, val);
1238	}
1239
1240iounmap:
1241	iounmap(base);
1242}
1243
1244static void quirk_usb_early_handoff(struct pci_dev *pdev)
1245{
1246	/* Skip Netlogic mips SoC's internal PCI USB controller.
1247	 * This device does not need/support EHCI/OHCI handoff
1248	 */
1249	if (pdev->vendor == 0x184e)	/* vendor Netlogic */
1250		return;
1251	if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
1252			pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
1253			pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
1254			pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
1255		return;
1256
1257	if (pci_enable_device(pdev) < 0) {
1258		dev_warn(&pdev->dev,
1259			 "Can't enable PCI device, BIOS handoff failed.\n");
1260		return;
1261	}
1262	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
1263		quirk_usb_handoff_uhci(pdev);
1264	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
1265		quirk_usb_handoff_ohci(pdev);
1266	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
1267		quirk_usb_disable_ehci(pdev);
1268	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
1269		quirk_usb_handoff_xhci(pdev);
1270	pci_disable_device(pdev);
1271}
1272DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1273			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);