Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * This file contains code to reset and initialize USB host controllers.
  3 * Some of it includes work-arounds for PCI hardware and BIOS quirks.
  4 * It may need to run early during booting -- before USB would normally
  5 * initialize -- to ensure that Linux doesn't use any legacy modes.
  6 *
  7 *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
  8 *  (and others)
  9 */
 10
 11#include <linux/types.h>
 12#include <linux/kernel.h>
 13#include <linux/pci.h>
 14#include <linux/init.h>
 15#include <linux/delay.h>
 
 16#include <linux/acpi.h>
 17#include <linux/dmi.h>
 18#include "pci-quirks.h"
 19#include "xhci-ext-caps.h"
 20
 21
 22#define UHCI_USBLEGSUP		0xc0		/* legacy support */
 23#define UHCI_USBCMD		0		/* command register */
 24#define UHCI_USBINTR		4		/* interrupt register */
 25#define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
 26#define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
 27#define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
 28#define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
 29#define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
 30#define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
 31#define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
 32
 33#define OHCI_CONTROL		0x04
 34#define OHCI_CMDSTATUS		0x08
 35#define OHCI_INTRSTATUS		0x0c
 36#define OHCI_INTRENABLE		0x10
 37#define OHCI_INTRDISABLE	0x14
 38#define OHCI_FMINTERVAL		0x34
 
 39#define OHCI_HCR		(1 << 0)	/* host controller reset */
 40#define OHCI_OCR		(1 << 3)	/* ownership change request */
 41#define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
 42#define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
 43#define OHCI_INTR_OC		(1 << 30)	/* ownership change */
 44
 45#define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
 46#define EHCI_USBCMD		0		/* command register */
 47#define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
 48#define EHCI_USBSTS		4		/* status register */
 49#define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
 50#define EHCI_USBINTR		8		/* interrupt register */
 51#define EHCI_CONFIGFLAG		0x40		/* configured flag register */
 52#define EHCI_USBLEGSUP		0		/* legacy support register */
 53#define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
 54#define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
 55#define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
 56#define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
 57
 58/* AMD quirk use */
 59#define	AB_REG_BAR_LOW		0xe0
 60#define	AB_REG_BAR_HIGH		0xe1
 61#define	AB_REG_BAR_SB700	0xf0
 62#define	AB_INDX(addr)		((addr) + 0x00)
 63#define	AB_DATA(addr)		((addr) + 0x04)
 64#define	AX_INDXC		0x30
 65#define	AX_DATAC		0x34
 66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67#define	NB_PCIE_INDX_ADDR	0xe0
 68#define	NB_PCIE_INDX_DATA	0xe4
 69#define	PCIE_P_CNTL		0x10040
 70#define	BIF_NB			0x10002
 71#define	NB_PIF0_PWRDOWN_0	0x01100012
 72#define	NB_PIF0_PWRDOWN_1	0x01100013
 73
 74#define USB_INTEL_XUSB2PR      0xD0
 
 75#define USB_INTEL_USB3_PSSEN   0xD8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77static struct amd_chipset_info {
 78	struct pci_dev	*nb_dev;
 79	struct pci_dev	*smbus_dev;
 80	int nb_type;
 81	int sb_type;
 82	int isoc_reqs;
 83	int probe_count;
 84	int probe_result;
 85} amd_chipset;
 86
 87static DEFINE_SPINLOCK(amd_lock);
 88
 89int usb_amd_find_chipset_info(void)
 
 
 
 
 
 
 
 
 90{
 91	u8 rev = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92	unsigned long flags;
 93	struct amd_chipset_info info;
 94	int ret;
 95
 96	spin_lock_irqsave(&amd_lock, flags);
 97
 98	/* probe only once */
 99	if (amd_chipset.probe_count > 0) {
100		amd_chipset.probe_count++;
101		spin_unlock_irqrestore(&amd_lock, flags);
102		return amd_chipset.probe_result;
103	}
104	memset(&info, 0, sizeof(info));
105	spin_unlock_irqrestore(&amd_lock, flags);
106
107	info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
108	if (info.smbus_dev) {
109		rev = info.smbus_dev->revision;
110		if (rev >= 0x40)
111			info.sb_type = 1;
112		else if (rev >= 0x30 && rev <= 0x3b)
113			info.sb_type = 3;
114	} else {
115		info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
116						0x780b, NULL);
117		if (!info.smbus_dev) {
118			ret = 0;
119			goto commit;
120		}
121
122		rev = info.smbus_dev->revision;
123		if (rev >= 0x11 && rev <= 0x18)
124			info.sb_type = 2;
125	}
126
127	if (info.sb_type == 0) {
 
 
 
 
 
128		if (info.smbus_dev) {
129			pci_dev_put(info.smbus_dev);
130			info.smbus_dev = NULL;
131		}
132		ret = 0;
133		goto commit;
134	}
135
136	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
137	if (info.nb_dev) {
138		info.nb_type = 1;
139	} else {
140		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
141		if (info.nb_dev) {
142			info.nb_type = 2;
143		} else {
144			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
145						     0x9600, NULL);
146			if (info.nb_dev)
147				info.nb_type = 3;
148		}
149	}
150
151	ret = info.probe_result = 1;
152	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
153
154commit:
155
156	spin_lock_irqsave(&amd_lock, flags);
157	if (amd_chipset.probe_count > 0) {
158		/* race - someone else was faster - drop devices */
159
160		/* Mark that we where here */
161		amd_chipset.probe_count++;
162		ret = amd_chipset.probe_result;
163
164		spin_unlock_irqrestore(&amd_lock, flags);
165
166		if (info.nb_dev)
167			pci_dev_put(info.nb_dev);
168		if (info.smbus_dev)
169			pci_dev_put(info.smbus_dev);
170
171	} else {
172		/* no race - commit the result */
173		info.probe_count++;
174		amd_chipset = info;
175		spin_unlock_irqrestore(&amd_lock, flags);
176	}
177
178	return ret;
179}
180EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182/*
183 * The hardware normally enables the A-link power management feature, which
184 * lets the system lower the power consumption in idle states.
185 *
186 * This USB quirk prevents the link going into that lower power state
187 * during isochronous transfers.
188 *
189 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
190 * some AMD platforms may stutter or have breaks occasionally.
191 */
192static void usb_amd_quirk_pll(int disable)
193{
194	u32 addr, addr_low, addr_high, val;
195	u32 bit = disable ? 0 : 1;
196	unsigned long flags;
197
198	spin_lock_irqsave(&amd_lock, flags);
199
200	if (disable) {
201		amd_chipset.isoc_reqs++;
202		if (amd_chipset.isoc_reqs > 1) {
203			spin_unlock_irqrestore(&amd_lock, flags);
204			return;
205		}
206	} else {
207		amd_chipset.isoc_reqs--;
208		if (amd_chipset.isoc_reqs > 0) {
209			spin_unlock_irqrestore(&amd_lock, flags);
210			return;
211		}
212	}
213
214	if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) {
 
 
215		outb_p(AB_REG_BAR_LOW, 0xcd6);
216		addr_low = inb_p(0xcd7);
217		outb_p(AB_REG_BAR_HIGH, 0xcd6);
218		addr_high = inb_p(0xcd7);
219		addr = addr_high << 8 | addr_low;
220
221		outl_p(0x30, AB_INDX(addr));
222		outl_p(0x40, AB_DATA(addr));
223		outl_p(0x34, AB_INDX(addr));
224		val = inl_p(AB_DATA(addr));
225	} else if (amd_chipset.sb_type == 3) {
 
226		pci_read_config_dword(amd_chipset.smbus_dev,
227					AB_REG_BAR_SB700, &addr);
228		outl(AX_INDXC, AB_INDX(addr));
229		outl(0x40, AB_DATA(addr));
230		outl(AX_DATAC, AB_INDX(addr));
231		val = inl(AB_DATA(addr));
232	} else {
233		spin_unlock_irqrestore(&amd_lock, flags);
234		return;
235	}
236
237	if (disable) {
238		val &= ~0x08;
239		val |= (1 << 4) | (1 << 9);
240	} else {
241		val |= 0x08;
242		val &= ~((1 << 4) | (1 << 9));
243	}
244	outl_p(val, AB_DATA(addr));
245
246	if (!amd_chipset.nb_dev) {
247		spin_unlock_irqrestore(&amd_lock, flags);
248		return;
249	}
250
251	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
252		addr = PCIE_P_CNTL;
253		pci_write_config_dword(amd_chipset.nb_dev,
254					NB_PCIE_INDX_ADDR, addr);
255		pci_read_config_dword(amd_chipset.nb_dev,
256					NB_PCIE_INDX_DATA, &val);
257
258		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
259		val |= bit | (bit << 3) | (bit << 12);
260		val |= ((!bit) << 4) | ((!bit) << 9);
261		pci_write_config_dword(amd_chipset.nb_dev,
262					NB_PCIE_INDX_DATA, val);
263
264		addr = BIF_NB;
265		pci_write_config_dword(amd_chipset.nb_dev,
266					NB_PCIE_INDX_ADDR, addr);
267		pci_read_config_dword(amd_chipset.nb_dev,
268					NB_PCIE_INDX_DATA, &val);
269		val &= ~(1 << 8);
270		val |= bit << 8;
271
272		pci_write_config_dword(amd_chipset.nb_dev,
273					NB_PCIE_INDX_DATA, val);
274	} else if (amd_chipset.nb_type == 2) {
275		addr = NB_PIF0_PWRDOWN_0;
276		pci_write_config_dword(amd_chipset.nb_dev,
277					NB_PCIE_INDX_ADDR, addr);
278		pci_read_config_dword(amd_chipset.nb_dev,
279					NB_PCIE_INDX_DATA, &val);
280		if (disable)
281			val &= ~(0x3f << 7);
282		else
283			val |= 0x3f << 7;
284
285		pci_write_config_dword(amd_chipset.nb_dev,
286					NB_PCIE_INDX_DATA, val);
287
288		addr = NB_PIF0_PWRDOWN_1;
289		pci_write_config_dword(amd_chipset.nb_dev,
290					NB_PCIE_INDX_ADDR, addr);
291		pci_read_config_dword(amd_chipset.nb_dev,
292					NB_PCIE_INDX_DATA, &val);
293		if (disable)
294			val &= ~(0x3f << 7);
295		else
296			val |= 0x3f << 7;
297
298		pci_write_config_dword(amd_chipset.nb_dev,
299					NB_PCIE_INDX_DATA, val);
300	}
301
302	spin_unlock_irqrestore(&amd_lock, flags);
303	return;
304}
305
306void usb_amd_quirk_pll_disable(void)
307{
308	usb_amd_quirk_pll(1);
309}
310EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312void usb_amd_quirk_pll_enable(void)
313{
314	usb_amd_quirk_pll(0);
315}
316EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
317
318void usb_amd_dev_put(void)
319{
320	struct pci_dev *nb, *smbus;
321	unsigned long flags;
322
323	spin_lock_irqsave(&amd_lock, flags);
324
325	amd_chipset.probe_count--;
326	if (amd_chipset.probe_count > 0) {
327		spin_unlock_irqrestore(&amd_lock, flags);
328		return;
329	}
330
331	/* save them to pci_dev_put outside of spinlock */
332	nb    = amd_chipset.nb_dev;
333	smbus = amd_chipset.smbus_dev;
334
335	amd_chipset.nb_dev = NULL;
336	amd_chipset.smbus_dev = NULL;
337	amd_chipset.nb_type = 0;
338	amd_chipset.sb_type = 0;
339	amd_chipset.isoc_reqs = 0;
340	amd_chipset.probe_result = 0;
341
342	spin_unlock_irqrestore(&amd_lock, flags);
343
344	if (nb)
345		pci_dev_put(nb);
346	if (smbus)
347		pci_dev_put(smbus);
348}
349EXPORT_SYMBOL_GPL(usb_amd_dev_put);
350
351/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352 * Make sure the controller is completely inactive, unable to
353 * generate interrupts or do DMA.
354 */
355void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
356{
357	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
358	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
359	 */
360	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
361
362	/* Reset the HC - this will force us to get a
363	 * new notification of any already connected
364	 * ports due to the virtual disconnect that it
365	 * implies.
366	 */
367	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
368	mb();
369	udelay(5);
370	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
371		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
372
373	/* Just to be safe, disable interrupt requests and
374	 * make sure the controller is stopped.
375	 */
376	outw(0, base + UHCI_USBINTR);
377	outw(0, base + UHCI_USBCMD);
378}
379EXPORT_SYMBOL_GPL(uhci_reset_hc);
380
381/*
382 * Initialize a controller that was newly discovered or has just been
383 * resumed.  In either case we can't be sure of its previous state.
384 *
385 * Returns: 1 if the controller was reset, 0 otherwise.
386 */
387int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
388{
389	u16 legsup;
390	unsigned int cmd, intr;
391
392	/*
393	 * When restarting a suspended controller, we expect all the
394	 * settings to be the same as we left them:
395	 *
396	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
397	 *	Controller is stopped and configured with EGSM set;
398	 *	No interrupts enabled except possibly Resume Detect.
399	 *
400	 * If any of these conditions are violated we do a complete reset.
401	 */
402	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
403	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
404		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
405				__func__, legsup);
406		goto reset_needed;
407	}
408
409	cmd = inw(base + UHCI_USBCMD);
410	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
411			!(cmd & UHCI_USBCMD_EGSM)) {
412		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
413				__func__, cmd);
414		goto reset_needed;
415	}
416
417	intr = inw(base + UHCI_USBINTR);
418	if (intr & (~UHCI_USBINTR_RESUME)) {
419		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
420				__func__, intr);
421		goto reset_needed;
422	}
423	return 0;
424
425reset_needed:
426	dev_dbg(&pdev->dev, "Performing full reset\n");
427	uhci_reset_hc(pdev, base);
428	return 1;
429}
430EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
431
432static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
433{
434	u16 cmd;
435	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
436}
437
438#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
439#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
440
441static void __devinit quirk_usb_handoff_uhci(struct pci_dev *pdev)
442{
443	unsigned long base = 0;
444	int i;
445
446	if (!pio_enabled(pdev))
447		return;
448
449	for (i = 0; i < PCI_ROM_RESOURCE; i++)
450		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
451			base = pci_resource_start(pdev, i);
452			break;
453		}
454
455	if (base)
456		uhci_check_and_reset_hc(pdev, base);
457}
458
459static int __devinit mmio_resource_enabled(struct pci_dev *pdev, int idx)
460{
461	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
462}
463
464static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
465{
466	void __iomem *base;
467	u32 control;
 
 
 
468
469	if (!mmio_resource_enabled(pdev, 0))
470		return;
471
472	base = pci_ioremap_bar(pdev, 0);
473	if (base == NULL)
474		return;
475
 
 
 
 
 
 
 
476	control = readl(base + OHCI_CONTROL);
477
478/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
479#ifdef __hppa__
480#define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
481#else
482#define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
483
484	if (control & OHCI_CTRL_IR) {
485		int wait_time = 500; /* arbitrary; 5 seconds */
486		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
487		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
488		while (wait_time > 0 &&
489				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
490			wait_time -= 10;
491			msleep(10);
492		}
493		if (wait_time <= 0)
494			dev_warn(&pdev->dev, "OHCI: BIOS handoff failed"
495					" (BIOS bug?) %08x\n",
496					readl(base + OHCI_CONTROL));
497	}
498#endif
499
500	/* reset controller, preserving RWC (and possibly IR) */
501	writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
502	readl(base + OHCI_CONTROL);
503
504	/* Some NVIDIA controllers stop working if kept in RESET for too long */
505	if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
506		u32 fminterval;
507		int cnt;
508
509		/* drive reset for at least 50 ms (7.1.7.5) */
510		msleep(50);
 
511
512		/* software reset of the controller, preserving HcFmInterval */
 
513		fminterval = readl(base + OHCI_FMINTERVAL);
514		writel(OHCI_HCR, base + OHCI_CMDSTATUS);
515
516		/* reset requires max 10 us delay */
517		for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
518			if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
519				break;
520			udelay(1);
521		}
522		writel(fminterval, base + OHCI_FMINTERVAL);
523
524		/* Now we're in the SUSPEND state with all devices reset
525		 * and wakeups and interrupts disabled
526		 */
 
 
527	}
528
529	/*
530	 * disable interrupts
531	 */
532	writel(~(u32)0, base + OHCI_INTRDISABLE);
533	writel(~(u32)0, base + OHCI_INTRSTATUS);
534
 
535	iounmap(base);
536}
537
538static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
539	{
540		/*  Pegatron Lucid (ExoPC) */
541		.matches = {
542			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
543			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
544		},
545	},
546	{
547		/*  Pegatron Lucid (Ordissimo AIRIS) */
548		.matches = {
549			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
550			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551		},
552	},
553	{ }
554};
555
556static void __devinit ehci_bios_handoff(struct pci_dev *pdev,
557					void __iomem *op_reg_base,
558					u32 cap, u8 offset)
559{
560	int try_handoff = 1, tried_handoff = 0;
561
562	/* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
563	 * the handoff on its unused controller.  Skip it. */
564	if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
 
 
 
 
 
565		if (dmi_check_system(ehci_dmi_nohandoff_table))
566			try_handoff = 0;
567	}
568
569	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
570		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
571
572#if 0
573/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
574 * but that seems dubious in general (the BIOS left it off intentionally)
575 * and is known to prevent some systems from booting.  so we won't do this
576 * unless maybe we can determine when we're on a system that needs SMI forced.
577 */
578		/* BIOS workaround (?): be sure the pre-Linux code
579		 * receives the SMI
580		 */
581		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
582		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
583				       val | EHCI_USBLEGCTLSTS_SOOE);
584#endif
585
586		/* some systems get upset if this semaphore is
587		 * set for any other reason than forcing a BIOS
588		 * handoff..
589		 */
590		pci_write_config_byte(pdev, offset + 3, 1);
591	}
592
593	/* if boot firmware now owns EHCI, spin till it hands it over. */
594	if (try_handoff) {
595		int msec = 1000;
596		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
597			tried_handoff = 1;
598			msleep(10);
599			msec -= 10;
600			pci_read_config_dword(pdev, offset, &cap);
601		}
602	}
603
604	if (cap & EHCI_USBLEGSUP_BIOS) {
605		/* well, possibly buggy BIOS... try to shut it down,
606		 * and hope nothing goes too wrong
607		 */
608		if (try_handoff)
609			dev_warn(&pdev->dev, "EHCI: BIOS handoff failed"
610				 " (BIOS bug?) %08x\n", cap);
 
611		pci_write_config_byte(pdev, offset + 2, 0);
612	}
613
614	/* just in case, always disable EHCI SMIs */
615	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
616
617	/* If the BIOS ever owned the controller then we can't expect
618	 * any power sessions to remain intact.
619	 */
620	if (tried_handoff)
621		writel(0, op_reg_base + EHCI_CONFIGFLAG);
622}
623
624static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
625{
626	void __iomem *base, *op_reg_base;
627	u32	hcc_params, cap, val;
628	u8	offset, cap_length;
629	int	wait_time, delta, count = 256/4;
630
631	if (!mmio_resource_enabled(pdev, 0))
632		return;
633
634	base = pci_ioremap_bar(pdev, 0);
635	if (base == NULL)
636		return;
637
638	cap_length = readb(base);
639	op_reg_base = base + cap_length;
640
641	/* EHCI 0.96 and later may have "extended capabilities"
642	 * spec section 5.1 explains the bios handoff, e.g. for
643	 * booting from USB disk or using a usb keyboard
644	 */
645	hcc_params = readl(base + EHCI_HCC_PARAMS);
646	offset = (hcc_params >> 8) & 0xff;
647	while (offset && --count) {
648		pci_read_config_dword(pdev, offset, &cap);
649
650		switch (cap & 0xff) {
651		case 1:
652			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
653			break;
654		case 0: /* Illegal reserved cap, set cap=0 so we exit */
655			cap = 0; /* then fallthrough... */
656		default:
657			dev_warn(&pdev->dev, "EHCI: unrecognized capability "
658				 "%02x\n", cap & 0xff);
 
659		}
660		offset = (cap >> 8) & 0xff;
661	}
662	if (!count)
663		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
664
665	/*
666	 * halt EHCI & disable its interrupts in any case
667	 */
668	val = readl(op_reg_base + EHCI_USBSTS);
669	if ((val & EHCI_USBSTS_HALTED) == 0) {
670		val = readl(op_reg_base + EHCI_USBCMD);
671		val &= ~EHCI_USBCMD_RUN;
672		writel(val, op_reg_base + EHCI_USBCMD);
673
674		wait_time = 2000;
675		delta = 100;
676		do {
677			writel(0x3f, op_reg_base + EHCI_USBSTS);
678			udelay(delta);
679			wait_time -= delta;
680			val = readl(op_reg_base + EHCI_USBSTS);
681			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
682				break;
683			}
684		} while (wait_time > 0);
685	}
686	writel(0, op_reg_base + EHCI_USBINTR);
687	writel(0x3f, op_reg_base + EHCI_USBSTS);
688
689	iounmap(base);
690}
691
692/*
693 * handshake - spin reading a register until handshake completes
694 * @ptr: address of hc register to be read
695 * @mask: bits to look at in result of read
696 * @done: value of those bits when handshake succeeds
697 * @wait_usec: timeout in microseconds
698 * @delay_usec: delay in microseconds to wait between polling
699 *
700 * Polls a register every delay_usec microseconds.
701 * Returns 0 when the mask bits have the value done.
702 * Returns -ETIMEDOUT if this condition is not true after
703 * wait_usec microseconds have passed.
704 */
705static int handshake(void __iomem *ptr, u32 mask, u32 done,
706		int wait_usec, int delay_usec)
707{
708	u32	result;
709
710	do {
711		result = readl(ptr);
712		result &= mask;
713		if (result == done)
714			return 0;
715		udelay(delay_usec);
716		wait_usec -= delay_usec;
717	} while (wait_usec > 0);
718	return -ETIMEDOUT;
719}
720
721bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
722{
723	return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
724		pdev->vendor == PCI_VENDOR_ID_INTEL &&
725		pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
726}
727EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
728
729/*
730 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
731 * share some number of ports.  These ports can be switched between either
732 * controller.  Not all of the ports under the EHCI host controller may be
733 * switchable.
734 *
735 * The ports should be switched over to xHCI before PCI probes for any device
736 * start.  This avoids active devices under EHCI being disconnected during the
737 * port switchover, which could cause loss of data on USB storage devices, or
738 * failed boot when the root file system is on a USB mass storage device and is
739 * enumerated under EHCI first.
740 *
741 * We write into the xHC's PCI configuration space in some Intel-specific
742 * registers to switch the ports over.  The USB 3.0 terminations and the USB
743 * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
744 * terminations before switching the USB 2.0 wires over, so that USB 3.0
745 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
746 */
747void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
748{
749	u32		ports_available;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
750
751	ports_available = 0xffffffff;
752	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
753	 * Register, to turn on SuperSpeed terminations for all
754	 * available ports.
755	 */
756	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
757			cpu_to_le32(ports_available));
758
759	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
760			&ports_available);
761	dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
762			"under xHCI: 0x%x\n", ports_available);
 
 
 
 
 
 
 
 
 
 
 
763
764	ports_available = 0xffffffff;
765	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
766	 * switch the USB 2.0 power and data lines over to the xHCI
767	 * host.
768	 */
769	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
770			cpu_to_le32(ports_available));
771
772	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
773			&ports_available);
774	dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
775			"to xHCI: 0x%x\n", ports_available);
 
776}
777EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
 
 
 
 
 
 
 
778
779/**
780 * PCI Quirks for xHCI.
781 *
782 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
783 * It signals to the BIOS that the OS wants control of the host controller,
784 * and then waits 5 seconds for the BIOS to hand over control.
785 * If we timeout, assume the BIOS is broken and take control anyway.
786 */
787static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
788{
789	void __iomem *base;
790	int ext_cap_offset;
791	void __iomem *op_reg_base;
792	u32 val;
793	int timeout;
 
794
795	if (!mmio_resource_enabled(pdev, 0))
796		return;
797
798	base = ioremap_nocache(pci_resource_start(pdev, 0),
799				pci_resource_len(pdev, 0));
800	if (base == NULL)
801		return;
802
803	/*
804	 * Find the Legacy Support Capability register -
805	 * this is optional for xHCI host controllers.
806	 */
807	ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
808	do {
809		if (!ext_cap_offset)
810			/* We've reached the end of the extended capabilities */
811			goto hc_init;
812		val = readl(base + ext_cap_offset);
813		if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
814			break;
815		ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
816	} while (1);
 
 
 
 
 
 
 
 
 
817
818	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
819	if (val & XHCI_HC_BIOS_OWNED) {
820		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
821
822		/* Wait for 5 seconds with 10 microsecond polling interval */
823		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
824				0, 5000, 10);
825
826		/* Assume a buggy BIOS and take HC ownership anyway */
827		if (timeout) {
828			dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
829					" (BIOS bug ?) %08x\n", val);
 
830			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
831		}
832	}
833
834	/* Disable any BIOS SMIs */
835	writel(XHCI_LEGACY_DISABLE_SMI,
836			base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
 
 
 
 
837
838	if (usb_is_intel_switchable_xhci(pdev))
839		usb_enable_xhci_ports(pdev);
840hc_init:
 
 
 
841	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
842
843	/* Wait for the host controller to be ready before writing any
844	 * operational or runtime registers.  Wait 5 seconds and no more.
845	 */
846	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
847			5000, 10);
848	/* Assume a buggy HC and start HC initialization anyway */
849	if (timeout) {
850		val = readl(op_reg_base + XHCI_STS_OFFSET);
851		dev_warn(&pdev->dev,
852				"xHCI HW not ready after 5 sec (HC bug?) "
853				"status = 0x%x\n", val);
854	}
855
856	/* Send the halt and disable interrupts command */
857	val = readl(op_reg_base + XHCI_CMD_OFFSET);
858	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
859	writel(val, op_reg_base + XHCI_CMD_OFFSET);
860
861	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
862	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
863			XHCI_MAX_HALT_USEC, 125);
864	if (timeout) {
865		val = readl(op_reg_base + XHCI_STS_OFFSET);
866		dev_warn(&pdev->dev,
867				"xHCI HW did not halt within %d usec "
868				"status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
869	}
870
 
871	iounmap(base);
872}
873
874static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
875{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
876	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
877		quirk_usb_handoff_uhci(pdev);
878	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
879		quirk_usb_handoff_ohci(pdev);
880	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
881		quirk_usb_disable_ehci(pdev);
882	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
883		quirk_usb_handoff_xhci(pdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
884}
885DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This file contains code to reset and initialize USB host controllers.
   4 * Some of it includes work-arounds for PCI hardware and BIOS quirks.
   5 * It may need to run early during booting -- before USB would normally
   6 * initialize -- to ensure that Linux doesn't use any legacy modes.
   7 *
   8 *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
   9 *  (and others)
  10 */
  11
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/pci.h>
 
  15#include <linux/delay.h>
  16#include <linux/export.h>
  17#include <linux/acpi.h>
  18#include <linux/dmi.h>
  19#include "pci-quirks.h"
  20#include "xhci-ext-caps.h"
  21
  22
  23#define UHCI_USBLEGSUP		0xc0		/* legacy support */
  24#define UHCI_USBCMD		0		/* command register */
  25#define UHCI_USBINTR		4		/* interrupt register */
  26#define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
  27#define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
  28#define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
  29#define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
  30#define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
  31#define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
  32#define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
  33
  34#define OHCI_CONTROL		0x04
  35#define OHCI_CMDSTATUS		0x08
  36#define OHCI_INTRSTATUS		0x0c
  37#define OHCI_INTRENABLE		0x10
  38#define OHCI_INTRDISABLE	0x14
  39#define OHCI_FMINTERVAL		0x34
  40#define OHCI_HCFS		(3 << 6)	/* hc functional state */
  41#define OHCI_HCR		(1 << 0)	/* host controller reset */
  42#define OHCI_OCR		(1 << 3)	/* ownership change request */
  43#define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
  44#define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
  45#define OHCI_INTR_OC		(1 << 30)	/* ownership change */
  46
  47#define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
  48#define EHCI_USBCMD		0		/* command register */
  49#define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
  50#define EHCI_USBSTS		4		/* status register */
  51#define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
  52#define EHCI_USBINTR		8		/* interrupt register */
  53#define EHCI_CONFIGFLAG		0x40		/* configured flag register */
  54#define EHCI_USBLEGSUP		0		/* legacy support register */
  55#define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
  56#define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
  57#define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
  58#define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
  59
  60/* AMD quirk use */
  61#define	AB_REG_BAR_LOW		0xe0
  62#define	AB_REG_BAR_HIGH		0xe1
  63#define	AB_REG_BAR_SB700	0xf0
  64#define	AB_INDX(addr)		((addr) + 0x00)
  65#define	AB_DATA(addr)		((addr) + 0x04)
  66#define	AX_INDXC		0x30
  67#define	AX_DATAC		0x34
  68
  69#define PT_ADDR_INDX		0xE8
  70#define PT_READ_INDX		0xE4
  71#define PT_SIG_1_ADDR		0xA520
  72#define PT_SIG_2_ADDR		0xA521
  73#define PT_SIG_3_ADDR		0xA522
  74#define PT_SIG_4_ADDR		0xA523
  75#define PT_SIG_1_DATA		0x78
  76#define PT_SIG_2_DATA		0x56
  77#define PT_SIG_3_DATA		0x34
  78#define PT_SIG_4_DATA		0x12
  79#define PT4_P1_REG		0xB521
  80#define PT4_P2_REG		0xB522
  81#define PT2_P1_REG		0xD520
  82#define PT2_P2_REG		0xD521
  83#define PT1_P1_REG		0xD522
  84#define PT1_P2_REG		0xD523
  85
  86#define	NB_PCIE_INDX_ADDR	0xe0
  87#define	NB_PCIE_INDX_DATA	0xe4
  88#define	PCIE_P_CNTL		0x10040
  89#define	BIF_NB			0x10002
  90#define	NB_PIF0_PWRDOWN_0	0x01100012
  91#define	NB_PIF0_PWRDOWN_1	0x01100013
  92
  93#define USB_INTEL_XUSB2PR      0xD0
  94#define USB_INTEL_USB2PRM      0xD4
  95#define USB_INTEL_USB3_PSSEN   0xD8
  96#define USB_INTEL_USB3PRM      0xDC
  97
  98/* ASMEDIA quirk use */
  99#define ASMT_DATA_WRITE0_REG	0xF8
 100#define ASMT_DATA_WRITE1_REG	0xFC
 101#define ASMT_CONTROL_REG	0xE0
 102#define ASMT_CONTROL_WRITE_BIT	0x02
 103#define ASMT_WRITEREG_CMD	0x10423
 104#define ASMT_FLOWCTL_ADDR	0xFA30
 105#define ASMT_FLOWCTL_DATA	0xBA
 106#define ASMT_PSEUDO_DATA	0
 107
 108/*
 109 * amd_chipset_gen values represent AMD different chipset generations
 110 */
 111enum amd_chipset_gen {
 112	NOT_AMD_CHIPSET = 0,
 113	AMD_CHIPSET_SB600,
 114	AMD_CHIPSET_SB700,
 115	AMD_CHIPSET_SB800,
 116	AMD_CHIPSET_HUDSON2,
 117	AMD_CHIPSET_BOLTON,
 118	AMD_CHIPSET_YANGTZE,
 119	AMD_CHIPSET_TAISHAN,
 120	AMD_CHIPSET_UNKNOWN,
 121};
 122
 123struct amd_chipset_type {
 124	enum amd_chipset_gen gen;
 125	u8 rev;
 126};
 127
 128static struct amd_chipset_info {
 129	struct pci_dev	*nb_dev;
 130	struct pci_dev	*smbus_dev;
 131	int nb_type;
 132	struct amd_chipset_type sb_type;
 133	int isoc_reqs;
 134	int probe_count;
 135	int probe_result;
 136} amd_chipset;
 137
 138static DEFINE_SPINLOCK(amd_lock);
 139
 140/*
 141 * amd_chipset_sb_type_init - initialize amd chipset southbridge type
 142 *
 143 * AMD FCH/SB generation and revision is identified by SMBus controller
 144 * vendor, device and revision IDs.
 145 *
 146 * Returns: 1 if it is an AMD chipset, 0 otherwise.
 147 */
 148static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
 149{
 150	u8 rev = 0;
 151	pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
 152
 153	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
 154			PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
 155	if (pinfo->smbus_dev) {
 156		rev = pinfo->smbus_dev->revision;
 157		if (rev >= 0x10 && rev <= 0x1f)
 158			pinfo->sb_type.gen = AMD_CHIPSET_SB600;
 159		else if (rev >= 0x30 && rev <= 0x3f)
 160			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
 161		else if (rev >= 0x40 && rev <= 0x4f)
 162			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
 163	} else {
 164		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 165				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
 166
 167		if (pinfo->smbus_dev) {
 168			rev = pinfo->smbus_dev->revision;
 169			if (rev >= 0x11 && rev <= 0x14)
 170				pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
 171			else if (rev >= 0x15 && rev <= 0x18)
 172				pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
 173			else if (rev >= 0x39 && rev <= 0x3a)
 174				pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
 175		} else {
 176			pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 177							  0x145c, NULL);
 178			if (pinfo->smbus_dev) {
 179				rev = pinfo->smbus_dev->revision;
 180				pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
 181			} else {
 182				pinfo->sb_type.gen = NOT_AMD_CHIPSET;
 183				return 0;
 184			}
 185		}
 186	}
 187	pinfo->sb_type.rev = rev;
 188	return 1;
 189}
 190
 191void sb800_prefetch(struct device *dev, int on)
 192{
 193	u16 misc;
 194	struct pci_dev *pdev = to_pci_dev(dev);
 195
 196	pci_read_config_word(pdev, 0x50, &misc);
 197	if (on == 0)
 198		pci_write_config_word(pdev, 0x50, misc & 0xfcff);
 199	else
 200		pci_write_config_word(pdev, 0x50, misc | 0x0300);
 201}
 202EXPORT_SYMBOL_GPL(sb800_prefetch);
 203
 204int usb_amd_find_chipset_info(void)
 205{
 206	unsigned long flags;
 207	struct amd_chipset_info info;
 208	int ret;
 209
 210	spin_lock_irqsave(&amd_lock, flags);
 211
 212	/* probe only once */
 213	if (amd_chipset.probe_count > 0) {
 214		amd_chipset.probe_count++;
 215		spin_unlock_irqrestore(&amd_lock, flags);
 216		return amd_chipset.probe_result;
 217	}
 218	memset(&info, 0, sizeof(info));
 219	spin_unlock_irqrestore(&amd_lock, flags);
 220
 221	if (!amd_chipset_sb_type_init(&info)) {
 222		ret = 0;
 223		goto commit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 224	}
 225
 226	/* Below chipset generations needn't enable AMD PLL quirk */
 227	if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
 228			info.sb_type.gen == AMD_CHIPSET_SB600 ||
 229			info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
 230			(info.sb_type.gen == AMD_CHIPSET_SB700 &&
 231			info.sb_type.rev > 0x3b)) {
 232		if (info.smbus_dev) {
 233			pci_dev_put(info.smbus_dev);
 234			info.smbus_dev = NULL;
 235		}
 236		ret = 0;
 237		goto commit;
 238	}
 239
 240	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
 241	if (info.nb_dev) {
 242		info.nb_type = 1;
 243	} else {
 244		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
 245		if (info.nb_dev) {
 246			info.nb_type = 2;
 247		} else {
 248			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 249						     0x9600, NULL);
 250			if (info.nb_dev)
 251				info.nb_type = 3;
 252		}
 253	}
 254
 255	ret = info.probe_result = 1;
 256	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
 257
 258commit:
 259
 260	spin_lock_irqsave(&amd_lock, flags);
 261	if (amd_chipset.probe_count > 0) {
 262		/* race - someone else was faster - drop devices */
 263
 264		/* Mark that we where here */
 265		amd_chipset.probe_count++;
 266		ret = amd_chipset.probe_result;
 267
 268		spin_unlock_irqrestore(&amd_lock, flags);
 269
 270		pci_dev_put(info.nb_dev);
 271		pci_dev_put(info.smbus_dev);
 
 
 272
 273	} else {
 274		/* no race - commit the result */
 275		info.probe_count++;
 276		amd_chipset = info;
 277		spin_unlock_irqrestore(&amd_lock, flags);
 278	}
 279
 280	return ret;
 281}
 282EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
 283
 284int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
 285{
 286	/* Make sure amd chipset type has already been initialized */
 287	usb_amd_find_chipset_info();
 288	if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
 289	    amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
 290		dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
 291		return 1;
 292	}
 293	return 0;
 294}
 295EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
 296
 297bool usb_amd_hang_symptom_quirk(void)
 298{
 299	u8 rev;
 300
 301	usb_amd_find_chipset_info();
 302	rev = amd_chipset.sb_type.rev;
 303	/* SB600 and old version of SB700 have hang symptom bug */
 304	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
 305			(amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 306			 rev >= 0x3a && rev <= 0x3b);
 307}
 308EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
 309
 310bool usb_amd_prefetch_quirk(void)
 311{
 312	usb_amd_find_chipset_info();
 313	/* SB800 needs pre-fetch fix */
 314	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
 315}
 316EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
 317
 318/*
 319 * The hardware normally enables the A-link power management feature, which
 320 * lets the system lower the power consumption in idle states.
 321 *
 322 * This USB quirk prevents the link going into that lower power state
 323 * during isochronous transfers.
 324 *
 325 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
 326 * some AMD platforms may stutter or have breaks occasionally.
 327 */
 328static void usb_amd_quirk_pll(int disable)
 329{
 330	u32 addr, addr_low, addr_high, val;
 331	u32 bit = disable ? 0 : 1;
 332	unsigned long flags;
 333
 334	spin_lock_irqsave(&amd_lock, flags);
 335
 336	if (disable) {
 337		amd_chipset.isoc_reqs++;
 338		if (amd_chipset.isoc_reqs > 1) {
 339			spin_unlock_irqrestore(&amd_lock, flags);
 340			return;
 341		}
 342	} else {
 343		amd_chipset.isoc_reqs--;
 344		if (amd_chipset.isoc_reqs > 0) {
 345			spin_unlock_irqrestore(&amd_lock, flags);
 346			return;
 347		}
 348	}
 349
 350	if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
 351			amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
 352			amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
 353		outb_p(AB_REG_BAR_LOW, 0xcd6);
 354		addr_low = inb_p(0xcd7);
 355		outb_p(AB_REG_BAR_HIGH, 0xcd6);
 356		addr_high = inb_p(0xcd7);
 357		addr = addr_high << 8 | addr_low;
 358
 359		outl_p(0x30, AB_INDX(addr));
 360		outl_p(0x40, AB_DATA(addr));
 361		outl_p(0x34, AB_INDX(addr));
 362		val = inl_p(AB_DATA(addr));
 363	} else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 364			amd_chipset.sb_type.rev <= 0x3b) {
 365		pci_read_config_dword(amd_chipset.smbus_dev,
 366					AB_REG_BAR_SB700, &addr);
 367		outl(AX_INDXC, AB_INDX(addr));
 368		outl(0x40, AB_DATA(addr));
 369		outl(AX_DATAC, AB_INDX(addr));
 370		val = inl(AB_DATA(addr));
 371	} else {
 372		spin_unlock_irqrestore(&amd_lock, flags);
 373		return;
 374	}
 375
 376	if (disable) {
 377		val &= ~0x08;
 378		val |= (1 << 4) | (1 << 9);
 379	} else {
 380		val |= 0x08;
 381		val &= ~((1 << 4) | (1 << 9));
 382	}
 383	outl_p(val, AB_DATA(addr));
 384
 385	if (!amd_chipset.nb_dev) {
 386		spin_unlock_irqrestore(&amd_lock, flags);
 387		return;
 388	}
 389
 390	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
 391		addr = PCIE_P_CNTL;
 392		pci_write_config_dword(amd_chipset.nb_dev,
 393					NB_PCIE_INDX_ADDR, addr);
 394		pci_read_config_dword(amd_chipset.nb_dev,
 395					NB_PCIE_INDX_DATA, &val);
 396
 397		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
 398		val |= bit | (bit << 3) | (bit << 12);
 399		val |= ((!bit) << 4) | ((!bit) << 9);
 400		pci_write_config_dword(amd_chipset.nb_dev,
 401					NB_PCIE_INDX_DATA, val);
 402
 403		addr = BIF_NB;
 404		pci_write_config_dword(amd_chipset.nb_dev,
 405					NB_PCIE_INDX_ADDR, addr);
 406		pci_read_config_dword(amd_chipset.nb_dev,
 407					NB_PCIE_INDX_DATA, &val);
 408		val &= ~(1 << 8);
 409		val |= bit << 8;
 410
 411		pci_write_config_dword(amd_chipset.nb_dev,
 412					NB_PCIE_INDX_DATA, val);
 413	} else if (amd_chipset.nb_type == 2) {
 414		addr = NB_PIF0_PWRDOWN_0;
 415		pci_write_config_dword(amd_chipset.nb_dev,
 416					NB_PCIE_INDX_ADDR, addr);
 417		pci_read_config_dword(amd_chipset.nb_dev,
 418					NB_PCIE_INDX_DATA, &val);
 419		if (disable)
 420			val &= ~(0x3f << 7);
 421		else
 422			val |= 0x3f << 7;
 423
 424		pci_write_config_dword(amd_chipset.nb_dev,
 425					NB_PCIE_INDX_DATA, val);
 426
 427		addr = NB_PIF0_PWRDOWN_1;
 428		pci_write_config_dword(amd_chipset.nb_dev,
 429					NB_PCIE_INDX_ADDR, addr);
 430		pci_read_config_dword(amd_chipset.nb_dev,
 431					NB_PCIE_INDX_DATA, &val);
 432		if (disable)
 433			val &= ~(0x3f << 7);
 434		else
 435			val |= 0x3f << 7;
 436
 437		pci_write_config_dword(amd_chipset.nb_dev,
 438					NB_PCIE_INDX_DATA, val);
 439	}
 440
 441	spin_unlock_irqrestore(&amd_lock, flags);
 442	return;
 443}
 444
 445void usb_amd_quirk_pll_disable(void)
 446{
 447	usb_amd_quirk_pll(1);
 448}
 449EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
 450
 451static int usb_asmedia_wait_write(struct pci_dev *pdev)
 452{
 453	unsigned long retry_count;
 454	unsigned char value;
 455
 456	for (retry_count = 1000; retry_count > 0; --retry_count) {
 457
 458		pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
 459
 460		if (value == 0xff) {
 461			dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
 462			return -EIO;
 463		}
 464
 465		if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
 466			return 0;
 467
 468		udelay(50);
 469	}
 470
 471	dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
 472	return -ETIMEDOUT;
 473}
 474
 475void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
 476{
 477	if (usb_asmedia_wait_write(pdev) != 0)
 478		return;
 479
 480	/* send command and address to device */
 481	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
 482	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
 483	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
 484
 485	if (usb_asmedia_wait_write(pdev) != 0)
 486		return;
 487
 488	/* send data to device */
 489	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
 490	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
 491	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
 492}
 493EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
 494
 495void usb_amd_quirk_pll_enable(void)
 496{
 497	usb_amd_quirk_pll(0);
 498}
 499EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
 500
 501void usb_amd_dev_put(void)
 502{
 503	struct pci_dev *nb, *smbus;
 504	unsigned long flags;
 505
 506	spin_lock_irqsave(&amd_lock, flags);
 507
 508	amd_chipset.probe_count--;
 509	if (amd_chipset.probe_count > 0) {
 510		spin_unlock_irqrestore(&amd_lock, flags);
 511		return;
 512	}
 513
 514	/* save them to pci_dev_put outside of spinlock */
 515	nb    = amd_chipset.nb_dev;
 516	smbus = amd_chipset.smbus_dev;
 517
 518	amd_chipset.nb_dev = NULL;
 519	amd_chipset.smbus_dev = NULL;
 520	amd_chipset.nb_type = 0;
 521	memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
 522	amd_chipset.isoc_reqs = 0;
 523	amd_chipset.probe_result = 0;
 524
 525	spin_unlock_irqrestore(&amd_lock, flags);
 526
 527	pci_dev_put(nb);
 528	pci_dev_put(smbus);
 
 
 529}
 530EXPORT_SYMBOL_GPL(usb_amd_dev_put);
 531
 532/*
 533 * Check if port is disabled in BIOS on AMD Promontory host.
 534 * BIOS Disabled ports may wake on connect/disconnect and need
 535 * driver workaround to keep them disabled.
 536 * Returns true if port is marked disabled.
 537 */
 538bool usb_amd_pt_check_port(struct device *device, int port)
 539{
 540	unsigned char value, port_shift;
 541	struct pci_dev *pdev;
 542	u16 reg;
 543
 544	pdev = to_pci_dev(device);
 545	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR);
 546
 547	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 548	if (value != PT_SIG_1_DATA)
 549		return false;
 550
 551	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR);
 552
 553	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 554	if (value != PT_SIG_2_DATA)
 555		return false;
 556
 557	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR);
 558
 559	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 560	if (value != PT_SIG_3_DATA)
 561		return false;
 562
 563	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR);
 564
 565	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 566	if (value != PT_SIG_4_DATA)
 567		return false;
 568
 569	/* Check disabled port setting, if bit is set port is enabled */
 570	switch (pdev->device) {
 571	case 0x43b9:
 572	case 0x43ba:
 573	/*
 574	 * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba)
 575	 * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0
 576	 * PT4_P2_REG bits[6..0] represents ports 13 to 7
 577	 */
 578		if (port > 6) {
 579			reg = PT4_P2_REG;
 580			port_shift = port - 7;
 581		} else {
 582			reg = PT4_P1_REG;
 583			port_shift = port + 1;
 584		}
 585		break;
 586	case 0x43bb:
 587	/*
 588	 * device is AMD_PROMONTORYA_2(0x43bb)
 589	 * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0
 590	 * PT2_P2_REG bits[5..0] represents ports 9 to 3
 591	 */
 592		if (port > 2) {
 593			reg = PT2_P2_REG;
 594			port_shift = port - 3;
 595		} else {
 596			reg = PT2_P1_REG;
 597			port_shift = port + 5;
 598		}
 599		break;
 600	case 0x43bc:
 601	/*
 602	 * device is AMD_PROMONTORYA_1(0x43bc)
 603	 * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0
 604	 * PT1_P2_REG[5..0] represents ports 9 to 4
 605	 */
 606		if (port > 3) {
 607			reg = PT1_P2_REG;
 608			port_shift = port - 4;
 609		} else {
 610			reg = PT1_P1_REG;
 611			port_shift = port + 4;
 612		}
 613		break;
 614	default:
 615		return false;
 616	}
 617	pci_write_config_word(pdev, PT_ADDR_INDX, reg);
 618	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 619
 620	return !(value & BIT(port_shift));
 621}
 622EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
 623
 624/*
 625 * Make sure the controller is completely inactive, unable to
 626 * generate interrupts or do DMA.
 627 */
 628void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
 629{
 630	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
 631	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
 632	 */
 633	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
 634
 635	/* Reset the HC - this will force us to get a
 636	 * new notification of any already connected
 637	 * ports due to the virtual disconnect that it
 638	 * implies.
 639	 */
 640	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
 641	mb();
 642	udelay(5);
 643	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
 644		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
 645
 646	/* Just to be safe, disable interrupt requests and
 647	 * make sure the controller is stopped.
 648	 */
 649	outw(0, base + UHCI_USBINTR);
 650	outw(0, base + UHCI_USBCMD);
 651}
 652EXPORT_SYMBOL_GPL(uhci_reset_hc);
 653
 654/*
 655 * Initialize a controller that was newly discovered or has just been
 656 * resumed.  In either case we can't be sure of its previous state.
 657 *
 658 * Returns: 1 if the controller was reset, 0 otherwise.
 659 */
 660int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
 661{
 662	u16 legsup;
 663	unsigned int cmd, intr;
 664
 665	/*
 666	 * When restarting a suspended controller, we expect all the
 667	 * settings to be the same as we left them:
 668	 *
 669	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
 670	 *	Controller is stopped and configured with EGSM set;
 671	 *	No interrupts enabled except possibly Resume Detect.
 672	 *
 673	 * If any of these conditions are violated we do a complete reset.
 674	 */
 675	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
 676	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
 677		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
 678				__func__, legsup);
 679		goto reset_needed;
 680	}
 681
 682	cmd = inw(base + UHCI_USBCMD);
 683	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
 684			!(cmd & UHCI_USBCMD_EGSM)) {
 685		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
 686				__func__, cmd);
 687		goto reset_needed;
 688	}
 689
 690	intr = inw(base + UHCI_USBINTR);
 691	if (intr & (~UHCI_USBINTR_RESUME)) {
 692		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
 693				__func__, intr);
 694		goto reset_needed;
 695	}
 696	return 0;
 697
 698reset_needed:
 699	dev_dbg(&pdev->dev, "Performing full reset\n");
 700	uhci_reset_hc(pdev, base);
 701	return 1;
 702}
 703EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
 704
 705static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
 706{
 707	u16 cmd;
 708	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
 709}
 710
 711#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
 712#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
 713
 714static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
 715{
 716	unsigned long base = 0;
 717	int i;
 718
 719	if (!pio_enabled(pdev))
 720		return;
 721
 722	for (i = 0; i < PCI_ROM_RESOURCE; i++)
 723		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
 724			base = pci_resource_start(pdev, i);
 725			break;
 726		}
 727
 728	if (base)
 729		uhci_check_and_reset_hc(pdev, base);
 730}
 731
 732static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
 733{
 734	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
 735}
 736
 737static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
 738{
 739	void __iomem *base;
 740	u32 control;
 741	u32 fminterval = 0;
 742	bool no_fminterval = false;
 743	int cnt;
 744
 745	if (!mmio_resource_enabled(pdev, 0))
 746		return;
 747
 748	base = pci_ioremap_bar(pdev, 0);
 749	if (base == NULL)
 750		return;
 751
 752	/*
 753	 * ULi M5237 OHCI controller locks the whole system when accessing
 754	 * the OHCI_FMINTERVAL offset.
 755	 */
 756	if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
 757		no_fminterval = true;
 758
 759	control = readl(base + OHCI_CONTROL);
 760
 761/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
 762#ifdef __hppa__
 763#define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
 764#else
 765#define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
 766
 767	if (control & OHCI_CTRL_IR) {
 768		int wait_time = 500; /* arbitrary; 5 seconds */
 769		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
 770		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
 771		while (wait_time > 0 &&
 772				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
 773			wait_time -= 10;
 774			msleep(10);
 775		}
 776		if (wait_time <= 0)
 777			dev_warn(&pdev->dev,
 778				 "OHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 779				 readl(base + OHCI_CONTROL));
 780	}
 781#endif
 782
 783	/* disable interrupts */
 784	writel((u32) ~0, base + OHCI_INTRDISABLE);
 785
 786	/* Reset the USB bus, if the controller isn't already in RESET */
 787	if (control & OHCI_HCFS) {
 788		/* Go into RESET, preserving RWC (and possibly IR) */
 789		writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
 790		readl(base + OHCI_CONTROL);
 791
 792		/* drive bus reset for at least 50 ms (7.1.7.5) */
 793		msleep(50);
 794	}
 795
 796	/* software reset of the controller, preserving HcFmInterval */
 797	if (!no_fminterval)
 798		fminterval = readl(base + OHCI_FMINTERVAL);
 
 799
 800	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
 
 
 
 
 
 
 801
 802	/* reset requires max 10 us delay */
 803	for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
 804		if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
 805			break;
 806		udelay(1);
 807	}
 808
 809	if (!no_fminterval)
 810		writel(fminterval, base + OHCI_FMINTERVAL);
 
 
 
 811
 812	/* Now the controller is safely in SUSPEND and nothing can wake it up */
 813	iounmap(base);
 814}
 815
 816static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
 817	{
 818		/*  Pegatron Lucid (ExoPC) */
 819		.matches = {
 820			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
 821			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
 822		},
 823	},
 824	{
 825		/*  Pegatron Lucid (Ordissimo AIRIS) */
 826		.matches = {
 827			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
 828			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 829		},
 830	},
 831	{
 832		/*  Pegatron Lucid (Ordissimo) */
 833		.matches = {
 834			DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
 835			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 836		},
 837	},
 838	{
 839		/* HASEE E200 */
 840		.matches = {
 841			DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
 842			DMI_MATCH(DMI_BOARD_NAME, "E210"),
 843			DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
 844		},
 845	},
 846	{ }
 847};
 848
 849static void ehci_bios_handoff(struct pci_dev *pdev,
 850					void __iomem *op_reg_base,
 851					u32 cap, u8 offset)
 852{
 853	int try_handoff = 1, tried_handoff = 0;
 854
 855	/*
 856	 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
 857	 * the handoff on its unused controller.  Skip it.
 858	 *
 859	 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
 860	 */
 861	if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
 862			pdev->device == 0x27cc)) {
 863		if (dmi_check_system(ehci_dmi_nohandoff_table))
 864			try_handoff = 0;
 865	}
 866
 867	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
 868		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
 869
 870#if 0
 871/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
 872 * but that seems dubious in general (the BIOS left it off intentionally)
 873 * and is known to prevent some systems from booting.  so we won't do this
 874 * unless maybe we can determine when we're on a system that needs SMI forced.
 875 */
 876		/* BIOS workaround (?): be sure the pre-Linux code
 877		 * receives the SMI
 878		 */
 879		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
 880		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
 881				       val | EHCI_USBLEGCTLSTS_SOOE);
 882#endif
 883
 884		/* some systems get upset if this semaphore is
 885		 * set for any other reason than forcing a BIOS
 886		 * handoff..
 887		 */
 888		pci_write_config_byte(pdev, offset + 3, 1);
 889	}
 890
 891	/* if boot firmware now owns EHCI, spin till it hands it over. */
 892	if (try_handoff) {
 893		int msec = 1000;
 894		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
 895			tried_handoff = 1;
 896			msleep(10);
 897			msec -= 10;
 898			pci_read_config_dword(pdev, offset, &cap);
 899		}
 900	}
 901
 902	if (cap & EHCI_USBLEGSUP_BIOS) {
 903		/* well, possibly buggy BIOS... try to shut it down,
 904		 * and hope nothing goes too wrong
 905		 */
 906		if (try_handoff)
 907			dev_warn(&pdev->dev,
 908				 "EHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 909				 cap);
 910		pci_write_config_byte(pdev, offset + 2, 0);
 911	}
 912
 913	/* just in case, always disable EHCI SMIs */
 914	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
 915
 916	/* If the BIOS ever owned the controller then we can't expect
 917	 * any power sessions to remain intact.
 918	 */
 919	if (tried_handoff)
 920		writel(0, op_reg_base + EHCI_CONFIGFLAG);
 921}
 922
 923static void quirk_usb_disable_ehci(struct pci_dev *pdev)
 924{
 925	void __iomem *base, *op_reg_base;
 926	u32	hcc_params, cap, val;
 927	u8	offset, cap_length;
 928	int	wait_time, count = 256/4;
 929
 930	if (!mmio_resource_enabled(pdev, 0))
 931		return;
 932
 933	base = pci_ioremap_bar(pdev, 0);
 934	if (base == NULL)
 935		return;
 936
 937	cap_length = readb(base);
 938	op_reg_base = base + cap_length;
 939
 940	/* EHCI 0.96 and later may have "extended capabilities"
 941	 * spec section 5.1 explains the bios handoff, e.g. for
 942	 * booting from USB disk or using a usb keyboard
 943	 */
 944	hcc_params = readl(base + EHCI_HCC_PARAMS);
 945	offset = (hcc_params >> 8) & 0xff;
 946	while (offset && --count) {
 947		pci_read_config_dword(pdev, offset, &cap);
 948
 949		switch (cap & 0xff) {
 950		case 1:
 951			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
 952			break;
 953		case 0: /* Illegal reserved cap, set cap=0 so we exit */
 954			cap = 0; /* fall through */
 955		default:
 956			dev_warn(&pdev->dev,
 957				 "EHCI: unrecognized capability %02x\n",
 958				 cap & 0xff);
 959		}
 960		offset = (cap >> 8) & 0xff;
 961	}
 962	if (!count)
 963		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
 964
 965	/*
 966	 * halt EHCI & disable its interrupts in any case
 967	 */
 968	val = readl(op_reg_base + EHCI_USBSTS);
 969	if ((val & EHCI_USBSTS_HALTED) == 0) {
 970		val = readl(op_reg_base + EHCI_USBCMD);
 971		val &= ~EHCI_USBCMD_RUN;
 972		writel(val, op_reg_base + EHCI_USBCMD);
 973
 974		wait_time = 2000;
 
 975		do {
 976			writel(0x3f, op_reg_base + EHCI_USBSTS);
 977			udelay(100);
 978			wait_time -= 100;
 979			val = readl(op_reg_base + EHCI_USBSTS);
 980			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
 981				break;
 982			}
 983		} while (wait_time > 0);
 984	}
 985	writel(0, op_reg_base + EHCI_USBINTR);
 986	writel(0x3f, op_reg_base + EHCI_USBSTS);
 987
 988	iounmap(base);
 989}
 990
 991/*
 992 * handshake - spin reading a register until handshake completes
 993 * @ptr: address of hc register to be read
 994 * @mask: bits to look at in result of read
 995 * @done: value of those bits when handshake succeeds
 996 * @wait_usec: timeout in microseconds
 997 * @delay_usec: delay in microseconds to wait between polling
 998 *
 999 * Polls a register every delay_usec microseconds.
1000 * Returns 0 when the mask bits have the value done.
1001 * Returns -ETIMEDOUT if this condition is not true after
1002 * wait_usec microseconds have passed.
1003 */
1004static int handshake(void __iomem *ptr, u32 mask, u32 done,
1005		int wait_usec, int delay_usec)
1006{
1007	u32	result;
1008
1009	do {
1010		result = readl(ptr);
1011		result &= mask;
1012		if (result == done)
1013			return 0;
1014		udelay(delay_usec);
1015		wait_usec -= delay_usec;
1016	} while (wait_usec > 0);
1017	return -ETIMEDOUT;
1018}
1019
 
 
 
 
 
 
 
 
1020/*
1021 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
1022 * share some number of ports.  These ports can be switched between either
1023 * controller.  Not all of the ports under the EHCI host controller may be
1024 * switchable.
1025 *
1026 * The ports should be switched over to xHCI before PCI probes for any device
1027 * start.  This avoids active devices under EHCI being disconnected during the
1028 * port switchover, which could cause loss of data on USB storage devices, or
1029 * failed boot when the root file system is on a USB mass storage device and is
1030 * enumerated under EHCI first.
1031 *
1032 * We write into the xHC's PCI configuration space in some Intel-specific
1033 * registers to switch the ports over.  The USB 3.0 terminations and the USB
1034 * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
1035 * terminations before switching the USB 2.0 wires over, so that USB 3.0
1036 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
1037 */
1038void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
1039{
1040	u32		ports_available;
1041	bool		ehci_found = false;
1042	struct pci_dev	*companion = NULL;
1043
1044	/* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
1045	 * switching ports from EHCI to xHCI
1046	 */
1047	if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
1048	    xhci_pdev->subsystem_device == 0x90a8)
1049		return;
1050
1051	/* make sure an intel EHCI controller exists */
1052	for_each_pci_dev(companion) {
1053		if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
1054		    companion->vendor == PCI_VENDOR_ID_INTEL) {
1055			ehci_found = true;
1056			break;
1057		}
1058	}
1059
1060	if (!ehci_found)
1061		return;
1062
1063	/* Don't switchover the ports if the user hasn't compiled the xHCI
1064	 * driver.  Otherwise they will see "dead" USB ports that don't power
1065	 * the devices.
1066	 */
1067	if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
1068		dev_warn(&xhci_pdev->dev,
1069			 "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n");
1070		dev_warn(&xhci_pdev->dev,
1071				"USB 3.0 devices will work at USB 2.0 speeds.\n");
1072		usb_disable_xhci_ports(xhci_pdev);
1073		return;
1074	}
1075
1076	/* Read USB3PRM, the USB 3.0 Port Routing Mask Register
1077	 * Indicate the ports that can be changed from OS.
1078	 */
1079	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
1080			&ports_available);
1081
1082	dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
1083			ports_available);
1084
 
1085	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
1086	 * Register, to turn on SuperSpeed terminations for the
1087	 * switchable ports.
1088	 */
1089	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
1090			ports_available);
1091
1092	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
1093			&ports_available);
1094	dev_dbg(&xhci_pdev->dev,
1095		"USB 3.0 ports that are now enabled under xHCI: 0x%x\n",
1096		ports_available);
1097
1098	/* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
1099	 * Indicate the USB 2.0 ports to be controlled by the xHCI host.
1100	 */
1101
1102	pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
1103			&ports_available);
1104
1105	dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
1106			ports_available);
1107
 
1108	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
1109	 * switch the USB 2.0 power and data lines over to the xHCI
1110	 * host.
1111	 */
1112	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
1113			ports_available);
1114
1115	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
1116			&ports_available);
1117	dev_dbg(&xhci_pdev->dev,
1118		"USB 2.0 ports that are now switched over to xHCI: 0x%x\n",
1119		ports_available);
1120}
1121EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
1122
1123void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
1124{
1125	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
1126	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
1127}
1128EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
1129
1130/**
1131 * PCI Quirks for xHCI.
1132 *
1133 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
1134 * It signals to the BIOS that the OS wants control of the host controller,
1135 * and then waits 1 second for the BIOS to hand over control.
1136 * If we timeout, assume the BIOS is broken and take control anyway.
1137 */
1138static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
1139{
1140	void __iomem *base;
1141	int ext_cap_offset;
1142	void __iomem *op_reg_base;
1143	u32 val;
1144	int timeout;
1145	int len = pci_resource_len(pdev, 0);
1146
1147	if (!mmio_resource_enabled(pdev, 0))
1148		return;
1149
1150	base = ioremap_nocache(pci_resource_start(pdev, 0), len);
 
1151	if (base == NULL)
1152		return;
1153
1154	/*
1155	 * Find the Legacy Support Capability register -
1156	 * this is optional for xHCI host controllers.
1157	 */
1158	ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY);
1159
1160	if (!ext_cap_offset)
1161		goto hc_init;
1162
1163	if ((ext_cap_offset + sizeof(val)) > len) {
1164		/* We're reading garbage from the controller */
1165		dev_warn(&pdev->dev, "xHCI controller failing to respond");
1166		goto iounmap;
1167	}
1168	val = readl(base + ext_cap_offset);
1169
1170	/* Auto handoff never worked for these devices. Force it and continue */
1171	if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) ||
1172			(pdev->vendor == PCI_VENDOR_ID_RENESAS
1173			 && pdev->device == 0x0014)) {
1174		val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED;
1175		writel(val, base + ext_cap_offset);
1176	}
1177
1178	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
1179	if (val & XHCI_HC_BIOS_OWNED) {
1180		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
1181
1182		/* Wait for 1 second with 10 microsecond polling interval */
1183		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
1184				0, 1000000, 10);
1185
1186		/* Assume a buggy BIOS and take HC ownership anyway */
1187		if (timeout) {
1188			dev_warn(&pdev->dev,
1189				 "xHCI BIOS handoff failed (BIOS bug ?) %08x\n",
1190				 val);
1191			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
1192		}
1193	}
1194
1195	val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1196	/* Mask off (turn off) any enabled SMIs */
1197	val &= XHCI_LEGACY_DISABLE_SMI;
1198	/* Mask all SMI events bits, RW1C */
1199	val |= XHCI_LEGACY_SMI_EVENTS;
1200	/* Disable any BIOS SMIs and clear all SMI events*/
1201	writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1202
 
 
1203hc_init:
1204	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
1205		usb_enable_intel_xhci_ports(pdev);
1206
1207	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
1208
1209	/* Wait for the host controller to be ready before writing any
1210	 * operational or runtime registers.  Wait 5 seconds and no more.
1211	 */
1212	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
1213			5000000, 10);
1214	/* Assume a buggy HC and start HC initialization anyway */
1215	if (timeout) {
1216		val = readl(op_reg_base + XHCI_STS_OFFSET);
1217		dev_warn(&pdev->dev,
1218			 "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n",
1219			 val);
1220	}
1221
1222	/* Send the halt and disable interrupts command */
1223	val = readl(op_reg_base + XHCI_CMD_OFFSET);
1224	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
1225	writel(val, op_reg_base + XHCI_CMD_OFFSET);
1226
1227	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
1228	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
1229			XHCI_MAX_HALT_USEC, 125);
1230	if (timeout) {
1231		val = readl(op_reg_base + XHCI_STS_OFFSET);
1232		dev_warn(&pdev->dev,
1233			 "xHCI HW did not halt within %d usec status = 0x%x\n",
1234			 XHCI_MAX_HALT_USEC, val);
1235	}
1236
1237iounmap:
1238	iounmap(base);
1239}
1240
1241static void quirk_usb_early_handoff(struct pci_dev *pdev)
1242{
1243	/* Skip Netlogic mips SoC's internal PCI USB controller.
1244	 * This device does not need/support EHCI/OHCI handoff
1245	 */
1246	if (pdev->vendor == 0x184e)	/* vendor Netlogic */
1247		return;
1248	if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
1249			pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
1250			pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
1251			pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
1252		return;
1253
1254	if (pci_enable_device(pdev) < 0) {
1255		dev_warn(&pdev->dev,
1256			 "Can't enable PCI device, BIOS handoff failed.\n");
1257		return;
1258	}
1259	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
1260		quirk_usb_handoff_uhci(pdev);
1261	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
1262		quirk_usb_handoff_ohci(pdev);
1263	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
1264		quirk_usb_disable_ehci(pdev);
1265	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
1266		quirk_usb_handoff_xhci(pdev);
1267	pci_disable_device(pdev);
1268}
1269DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1270			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
1271
1272bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
1273{
1274	/*
1275	 * Our dear uPD72020{1,2} friend only partially resets when
1276	 * asked to via the XHCI interface, and may end up doing DMA
1277	 * at the wrong addresses, as it keeps the top 32bit of some
1278	 * addresses from its previous programming under obscure
1279	 * circumstances.
1280	 * Give it a good wack at probe time. Unfortunately, this
1281	 * needs to happen before we've had a chance to discover any
1282	 * quirk, or the system will be in a rather bad state.
1283	 */
1284	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
1285	    (pdev->device == 0x0014 || pdev->device == 0x0015))
1286		return true;
1287
1288	return false;
1289}
1290EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);