Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v4.6
 
   1/*
   2 * This file contains code to reset and initialize USB host controllers.
   3 * Some of it includes work-arounds for PCI hardware and BIOS quirks.
   4 * It may need to run early during booting -- before USB would normally
   5 * initialize -- to ensure that Linux doesn't use any legacy modes.
   6 *
   7 *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
   8 *  (and others)
   9 */
  10
  11#include <linux/types.h>
  12#include <linux/kconfig.h>
  13#include <linux/kernel.h>
  14#include <linux/pci.h>
  15#include <linux/delay.h>
  16#include <linux/export.h>
  17#include <linux/acpi.h>
  18#include <linux/dmi.h>
  19#include "pci-quirks.h"
  20#include "xhci-ext-caps.h"
  21
  22
  23#define UHCI_USBLEGSUP		0xc0		/* legacy support */
  24#define UHCI_USBCMD		0		/* command register */
  25#define UHCI_USBINTR		4		/* interrupt register */
  26#define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
  27#define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
  28#define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
  29#define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
  30#define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
  31#define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
  32#define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
  33
  34#define OHCI_CONTROL		0x04
  35#define OHCI_CMDSTATUS		0x08
  36#define OHCI_INTRSTATUS		0x0c
  37#define OHCI_INTRENABLE		0x10
  38#define OHCI_INTRDISABLE	0x14
  39#define OHCI_FMINTERVAL		0x34
  40#define OHCI_HCFS		(3 << 6)	/* hc functional state */
  41#define OHCI_HCR		(1 << 0)	/* host controller reset */
  42#define OHCI_OCR		(1 << 3)	/* ownership change request */
  43#define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
  44#define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
  45#define OHCI_INTR_OC		(1 << 30)	/* ownership change */
  46
  47#define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
  48#define EHCI_USBCMD		0		/* command register */
  49#define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
  50#define EHCI_USBSTS		4		/* status register */
  51#define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
  52#define EHCI_USBINTR		8		/* interrupt register */
  53#define EHCI_CONFIGFLAG		0x40		/* configured flag register */
  54#define EHCI_USBLEGSUP		0		/* legacy support register */
  55#define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
  56#define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
  57#define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
  58#define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
  59
  60/* AMD quirk use */
  61#define	AB_REG_BAR_LOW		0xe0
  62#define	AB_REG_BAR_HIGH		0xe1
  63#define	AB_REG_BAR_SB700	0xf0
  64#define	AB_INDX(addr)		((addr) + 0x00)
  65#define	AB_DATA(addr)		((addr) + 0x04)
  66#define	AX_INDXC		0x30
  67#define	AX_DATAC		0x34
  68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69#define	NB_PCIE_INDX_ADDR	0xe0
  70#define	NB_PCIE_INDX_DATA	0xe4
  71#define	PCIE_P_CNTL		0x10040
  72#define	BIF_NB			0x10002
  73#define	NB_PIF0_PWRDOWN_0	0x01100012
  74#define	NB_PIF0_PWRDOWN_1	0x01100013
  75
  76#define USB_INTEL_XUSB2PR      0xD0
  77#define USB_INTEL_USB2PRM      0xD4
  78#define USB_INTEL_USB3_PSSEN   0xD8
  79#define USB_INTEL_USB3PRM      0xDC
  80
 
 
 
 
 
 
 
 
 
 
  81/*
  82 * amd_chipset_gen values represent AMD different chipset generations
  83 */
  84enum amd_chipset_gen {
  85	NOT_AMD_CHIPSET = 0,
  86	AMD_CHIPSET_SB600,
  87	AMD_CHIPSET_SB700,
  88	AMD_CHIPSET_SB800,
  89	AMD_CHIPSET_HUDSON2,
  90	AMD_CHIPSET_BOLTON,
  91	AMD_CHIPSET_YANGTZE,
 
  92	AMD_CHIPSET_UNKNOWN,
  93};
  94
  95struct amd_chipset_type {
  96	enum amd_chipset_gen gen;
  97	u8 rev;
  98};
  99
 100static struct amd_chipset_info {
 101	struct pci_dev	*nb_dev;
 102	struct pci_dev	*smbus_dev;
 103	int nb_type;
 104	struct amd_chipset_type sb_type;
 105	int isoc_reqs;
 106	int probe_count;
 107	int probe_result;
 108} amd_chipset;
 109
 110static DEFINE_SPINLOCK(amd_lock);
 111
 112/*
 113 * amd_chipset_sb_type_init - initialize amd chipset southbridge type
 114 *
 115 * AMD FCH/SB generation and revision is identified by SMBus controller
 116 * vendor, device and revision IDs.
 117 *
 118 * Returns: 1 if it is an AMD chipset, 0 otherwise.
 119 */
 120static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
 121{
 122	u8 rev = 0;
 123	pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
 124
 125	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
 126			PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
 127	if (pinfo->smbus_dev) {
 128		rev = pinfo->smbus_dev->revision;
 129		if (rev >= 0x10 && rev <= 0x1f)
 130			pinfo->sb_type.gen = AMD_CHIPSET_SB600;
 131		else if (rev >= 0x30 && rev <= 0x3f)
 132			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
 133		else if (rev >= 0x40 && rev <= 0x4f)
 134			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
 135	} else {
 136		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 137				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
 138
 139		if (!pinfo->smbus_dev) {
 140			pinfo->sb_type.gen = NOT_AMD_CHIPSET;
 141			return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 142		}
 143
 144		rev = pinfo->smbus_dev->revision;
 145		if (rev >= 0x11 && rev <= 0x14)
 146			pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
 147		else if (rev >= 0x15 && rev <= 0x18)
 148			pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
 149		else if (rev >= 0x39 && rev <= 0x3a)
 150			pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
 151	}
 152
 153	pinfo->sb_type.rev = rev;
 154	return 1;
 155}
 156
 157void sb800_prefetch(struct device *dev, int on)
 158{
 159	u16 misc;
 160	struct pci_dev *pdev = to_pci_dev(dev);
 161
 162	pci_read_config_word(pdev, 0x50, &misc);
 163	if (on == 0)
 164		pci_write_config_word(pdev, 0x50, misc & 0xfcff);
 165	else
 166		pci_write_config_word(pdev, 0x50, misc | 0x0300);
 167}
 168EXPORT_SYMBOL_GPL(sb800_prefetch);
 169
 170int usb_amd_find_chipset_info(void)
 171{
 172	unsigned long flags;
 173	struct amd_chipset_info info;
 174	int ret;
 175
 176	spin_lock_irqsave(&amd_lock, flags);
 177
 178	/* probe only once */
 179	if (amd_chipset.probe_count > 0) {
 180		amd_chipset.probe_count++;
 181		spin_unlock_irqrestore(&amd_lock, flags);
 182		return amd_chipset.probe_result;
 183	}
 184	memset(&info, 0, sizeof(info));
 185	spin_unlock_irqrestore(&amd_lock, flags);
 186
 187	if (!amd_chipset_sb_type_init(&info)) {
 188		ret = 0;
 189		goto commit;
 190	}
 191
 192	/* Below chipset generations needn't enable AMD PLL quirk */
 193	if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
 194			info.sb_type.gen == AMD_CHIPSET_SB600 ||
 195			info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
 196			(info.sb_type.gen == AMD_CHIPSET_SB700 &&
 197			info.sb_type.rev > 0x3b)) {
 
 
 
 
 
 
 
 
 
 198		if (info.smbus_dev) {
 199			pci_dev_put(info.smbus_dev);
 200			info.smbus_dev = NULL;
 201		}
 202		ret = 0;
 203		goto commit;
 204	}
 205
 206	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
 207	if (info.nb_dev) {
 208		info.nb_type = 1;
 209	} else {
 210		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
 211		if (info.nb_dev) {
 212			info.nb_type = 2;
 213		} else {
 214			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 215						     0x9600, NULL);
 216			if (info.nb_dev)
 217				info.nb_type = 3;
 218		}
 219	}
 220
 221	ret = info.probe_result = 1;
 222	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
 223
 224commit:
 225
 226	spin_lock_irqsave(&amd_lock, flags);
 227	if (amd_chipset.probe_count > 0) {
 228		/* race - someone else was faster - drop devices */
 229
 230		/* Mark that we where here */
 231		amd_chipset.probe_count++;
 232		ret = amd_chipset.probe_result;
 233
 234		spin_unlock_irqrestore(&amd_lock, flags);
 235
 236		pci_dev_put(info.nb_dev);
 237		pci_dev_put(info.smbus_dev);
 238
 239	} else {
 240		/* no race - commit the result */
 241		info.probe_count++;
 242		amd_chipset = info;
 243		spin_unlock_irqrestore(&amd_lock, flags);
 244	}
 245
 246	return ret;
 247}
 248EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
 249
 250int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
 251{
 252	/* Make sure amd chipset type has already been initialized */
 253	usb_amd_find_chipset_info();
 254	if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
 255		return 0;
 256
 257	dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
 258	return 1;
 
 259}
 260EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
 261
 262bool usb_amd_hang_symptom_quirk(void)
 263{
 264	u8 rev;
 265
 266	usb_amd_find_chipset_info();
 267	rev = amd_chipset.sb_type.rev;
 268	/* SB600 and old version of SB700 have hang symptom bug */
 269	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
 270			(amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 271			 rev >= 0x3a && rev <= 0x3b);
 272}
 273EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
 274
 275bool usb_amd_prefetch_quirk(void)
 276{
 277	usb_amd_find_chipset_info();
 278	/* SB800 needs pre-fetch fix */
 279	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
 280}
 281EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
 282
 
 
 
 
 
 
 
 283/*
 284 * The hardware normally enables the A-link power management feature, which
 285 * lets the system lower the power consumption in idle states.
 286 *
 287 * This USB quirk prevents the link going into that lower power state
 288 * during isochronous transfers.
 289 *
 290 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
 291 * some AMD platforms may stutter or have breaks occasionally.
 292 */
 293static void usb_amd_quirk_pll(int disable)
 294{
 295	u32 addr, addr_low, addr_high, val;
 296	u32 bit = disable ? 0 : 1;
 297	unsigned long flags;
 298
 299	spin_lock_irqsave(&amd_lock, flags);
 300
 301	if (disable) {
 302		amd_chipset.isoc_reqs++;
 303		if (amd_chipset.isoc_reqs > 1) {
 304			spin_unlock_irqrestore(&amd_lock, flags);
 305			return;
 306		}
 307	} else {
 308		amd_chipset.isoc_reqs--;
 309		if (amd_chipset.isoc_reqs > 0) {
 310			spin_unlock_irqrestore(&amd_lock, flags);
 311			return;
 312		}
 313	}
 314
 315	if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
 316			amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
 317			amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
 318		outb_p(AB_REG_BAR_LOW, 0xcd6);
 319		addr_low = inb_p(0xcd7);
 320		outb_p(AB_REG_BAR_HIGH, 0xcd6);
 321		addr_high = inb_p(0xcd7);
 322		addr = addr_high << 8 | addr_low;
 323
 324		outl_p(0x30, AB_INDX(addr));
 325		outl_p(0x40, AB_DATA(addr));
 326		outl_p(0x34, AB_INDX(addr));
 327		val = inl_p(AB_DATA(addr));
 328	} else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 329			amd_chipset.sb_type.rev <= 0x3b) {
 330		pci_read_config_dword(amd_chipset.smbus_dev,
 331					AB_REG_BAR_SB700, &addr);
 332		outl(AX_INDXC, AB_INDX(addr));
 333		outl(0x40, AB_DATA(addr));
 334		outl(AX_DATAC, AB_INDX(addr));
 335		val = inl(AB_DATA(addr));
 336	} else {
 337		spin_unlock_irqrestore(&amd_lock, flags);
 338		return;
 339	}
 340
 341	if (disable) {
 342		val &= ~0x08;
 343		val |= (1 << 4) | (1 << 9);
 344	} else {
 345		val |= 0x08;
 346		val &= ~((1 << 4) | (1 << 9));
 347	}
 348	outl_p(val, AB_DATA(addr));
 349
 350	if (!amd_chipset.nb_dev) {
 351		spin_unlock_irqrestore(&amd_lock, flags);
 352		return;
 353	}
 354
 355	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
 356		addr = PCIE_P_CNTL;
 357		pci_write_config_dword(amd_chipset.nb_dev,
 358					NB_PCIE_INDX_ADDR, addr);
 359		pci_read_config_dword(amd_chipset.nb_dev,
 360					NB_PCIE_INDX_DATA, &val);
 361
 362		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
 363		val |= bit | (bit << 3) | (bit << 12);
 364		val |= ((!bit) << 4) | ((!bit) << 9);
 365		pci_write_config_dword(amd_chipset.nb_dev,
 366					NB_PCIE_INDX_DATA, val);
 367
 368		addr = BIF_NB;
 369		pci_write_config_dword(amd_chipset.nb_dev,
 370					NB_PCIE_INDX_ADDR, addr);
 371		pci_read_config_dword(amd_chipset.nb_dev,
 372					NB_PCIE_INDX_DATA, &val);
 373		val &= ~(1 << 8);
 374		val |= bit << 8;
 375
 376		pci_write_config_dword(amd_chipset.nb_dev,
 377					NB_PCIE_INDX_DATA, val);
 378	} else if (amd_chipset.nb_type == 2) {
 379		addr = NB_PIF0_PWRDOWN_0;
 380		pci_write_config_dword(amd_chipset.nb_dev,
 381					NB_PCIE_INDX_ADDR, addr);
 382		pci_read_config_dword(amd_chipset.nb_dev,
 383					NB_PCIE_INDX_DATA, &val);
 384		if (disable)
 385			val &= ~(0x3f << 7);
 386		else
 387			val |= 0x3f << 7;
 388
 389		pci_write_config_dword(amd_chipset.nb_dev,
 390					NB_PCIE_INDX_DATA, val);
 391
 392		addr = NB_PIF0_PWRDOWN_1;
 393		pci_write_config_dword(amd_chipset.nb_dev,
 394					NB_PCIE_INDX_ADDR, addr);
 395		pci_read_config_dword(amd_chipset.nb_dev,
 396					NB_PCIE_INDX_DATA, &val);
 397		if (disable)
 398			val &= ~(0x3f << 7);
 399		else
 400			val |= 0x3f << 7;
 401
 402		pci_write_config_dword(amd_chipset.nb_dev,
 403					NB_PCIE_INDX_DATA, val);
 404	}
 405
 406	spin_unlock_irqrestore(&amd_lock, flags);
 407	return;
 408}
 409
 410void usb_amd_quirk_pll_disable(void)
 411{
 412	usb_amd_quirk_pll(1);
 413}
 414EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
 415
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 416void usb_amd_quirk_pll_enable(void)
 417{
 418	usb_amd_quirk_pll(0);
 419}
 420EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
 421
 422void usb_amd_dev_put(void)
 423{
 424	struct pci_dev *nb, *smbus;
 425	unsigned long flags;
 426
 427	spin_lock_irqsave(&amd_lock, flags);
 428
 429	amd_chipset.probe_count--;
 430	if (amd_chipset.probe_count > 0) {
 431		spin_unlock_irqrestore(&amd_lock, flags);
 432		return;
 433	}
 434
 435	/* save them to pci_dev_put outside of spinlock */
 436	nb    = amd_chipset.nb_dev;
 437	smbus = amd_chipset.smbus_dev;
 438
 439	amd_chipset.nb_dev = NULL;
 440	amd_chipset.smbus_dev = NULL;
 441	amd_chipset.nb_type = 0;
 442	memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
 443	amd_chipset.isoc_reqs = 0;
 444	amd_chipset.probe_result = 0;
 445
 446	spin_unlock_irqrestore(&amd_lock, flags);
 447
 448	pci_dev_put(nb);
 449	pci_dev_put(smbus);
 450}
 451EXPORT_SYMBOL_GPL(usb_amd_dev_put);
 452
 453/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454 * Make sure the controller is completely inactive, unable to
 455 * generate interrupts or do DMA.
 456 */
 457void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
 458{
 459	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
 460	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
 461	 */
 462	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
 463
 464	/* Reset the HC - this will force us to get a
 465	 * new notification of any already connected
 466	 * ports due to the virtual disconnect that it
 467	 * implies.
 468	 */
 469	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
 470	mb();
 471	udelay(5);
 472	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
 473		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
 474
 475	/* Just to be safe, disable interrupt requests and
 476	 * make sure the controller is stopped.
 477	 */
 478	outw(0, base + UHCI_USBINTR);
 479	outw(0, base + UHCI_USBCMD);
 480}
 481EXPORT_SYMBOL_GPL(uhci_reset_hc);
 482
 483/*
 484 * Initialize a controller that was newly discovered or has just been
 485 * resumed.  In either case we can't be sure of its previous state.
 486 *
 487 * Returns: 1 if the controller was reset, 0 otherwise.
 488 */
 489int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
 490{
 491	u16 legsup;
 492	unsigned int cmd, intr;
 493
 494	/*
 495	 * When restarting a suspended controller, we expect all the
 496	 * settings to be the same as we left them:
 497	 *
 498	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
 499	 *	Controller is stopped and configured with EGSM set;
 500	 *	No interrupts enabled except possibly Resume Detect.
 501	 *
 502	 * If any of these conditions are violated we do a complete reset.
 503	 */
 504	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
 505	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
 506		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
 507				__func__, legsup);
 508		goto reset_needed;
 509	}
 510
 511	cmd = inw(base + UHCI_USBCMD);
 512	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
 513			!(cmd & UHCI_USBCMD_EGSM)) {
 514		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
 515				__func__, cmd);
 516		goto reset_needed;
 517	}
 518
 519	intr = inw(base + UHCI_USBINTR);
 520	if (intr & (~UHCI_USBINTR_RESUME)) {
 521		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
 522				__func__, intr);
 523		goto reset_needed;
 524	}
 525	return 0;
 526
 527reset_needed:
 528	dev_dbg(&pdev->dev, "Performing full reset\n");
 529	uhci_reset_hc(pdev, base);
 530	return 1;
 531}
 532EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
 533
 534static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
 535{
 536	u16 cmd;
 537	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
 538}
 539
 540#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
 541#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
 542
 543static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
 544{
 545	unsigned long base = 0;
 546	int i;
 547
 548	if (!pio_enabled(pdev))
 549		return;
 550
 551	for (i = 0; i < PCI_ROM_RESOURCE; i++)
 552		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
 553			base = pci_resource_start(pdev, i);
 554			break;
 555		}
 556
 557	if (base)
 558		uhci_check_and_reset_hc(pdev, base);
 559}
 560
 561static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
 562{
 563	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
 564}
 565
 566static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
 567{
 568	void __iomem *base;
 569	u32 control;
 570	u32 fminterval = 0;
 571	bool no_fminterval = false;
 572	int cnt;
 573
 574	if (!mmio_resource_enabled(pdev, 0))
 575		return;
 576
 577	base = pci_ioremap_bar(pdev, 0);
 578	if (base == NULL)
 579		return;
 580
 581	/*
 582	 * ULi M5237 OHCI controller locks the whole system when accessing
 583	 * the OHCI_FMINTERVAL offset.
 584	 */
 585	if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
 586		no_fminterval = true;
 587
 588	control = readl(base + OHCI_CONTROL);
 589
 590/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
 591#ifdef __hppa__
 592#define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
 593#else
 594#define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
 595
 596	if (control & OHCI_CTRL_IR) {
 597		int wait_time = 500; /* arbitrary; 5 seconds */
 598		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
 599		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
 600		while (wait_time > 0 &&
 601				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
 602			wait_time -= 10;
 603			msleep(10);
 604		}
 605		if (wait_time <= 0)
 606			dev_warn(&pdev->dev,
 607				 "OHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 608				 readl(base + OHCI_CONTROL));
 609	}
 610#endif
 611
 612	/* disable interrupts */
 613	writel((u32) ~0, base + OHCI_INTRDISABLE);
 614
 615	/* Reset the USB bus, if the controller isn't already in RESET */
 616	if (control & OHCI_HCFS) {
 617		/* Go into RESET, preserving RWC (and possibly IR) */
 618		writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
 619		readl(base + OHCI_CONTROL);
 620
 621		/* drive bus reset for at least 50 ms (7.1.7.5) */
 622		msleep(50);
 623	}
 624
 625	/* software reset of the controller, preserving HcFmInterval */
 626	if (!no_fminterval)
 627		fminterval = readl(base + OHCI_FMINTERVAL);
 628
 629	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
 630
 631	/* reset requires max 10 us delay */
 632	for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
 633		if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
 634			break;
 635		udelay(1);
 636	}
 637
 638	if (!no_fminterval)
 639		writel(fminterval, base + OHCI_FMINTERVAL);
 640
 641	/* Now the controller is safely in SUSPEND and nothing can wake it up */
 642	iounmap(base);
 643}
 644
 645static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
 646	{
 647		/*  Pegatron Lucid (ExoPC) */
 648		.matches = {
 649			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
 650			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
 651		},
 652	},
 653	{
 654		/*  Pegatron Lucid (Ordissimo AIRIS) */
 655		.matches = {
 656			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
 657			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 658		},
 659	},
 660	{
 661		/*  Pegatron Lucid (Ordissimo) */
 662		.matches = {
 663			DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
 664			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 665		},
 666	},
 667	{
 668		/* HASEE E200 */
 669		.matches = {
 670			DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
 671			DMI_MATCH(DMI_BOARD_NAME, "E210"),
 672			DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
 673		},
 674	},
 675	{ }
 676};
 677
 678static void ehci_bios_handoff(struct pci_dev *pdev,
 679					void __iomem *op_reg_base,
 680					u32 cap, u8 offset)
 681{
 682	int try_handoff = 1, tried_handoff = 0;
 683
 684	/*
 685	 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
 686	 * the handoff on its unused controller.  Skip it.
 687	 *
 688	 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
 689	 */
 690	if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
 691			pdev->device == 0x27cc)) {
 692		if (dmi_check_system(ehci_dmi_nohandoff_table))
 693			try_handoff = 0;
 694	}
 695
 696	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
 697		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
 698
 699#if 0
 700/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
 701 * but that seems dubious in general (the BIOS left it off intentionally)
 702 * and is known to prevent some systems from booting.  so we won't do this
 703 * unless maybe we can determine when we're on a system that needs SMI forced.
 704 */
 705		/* BIOS workaround (?): be sure the pre-Linux code
 706		 * receives the SMI
 707		 */
 708		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
 709		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
 710				       val | EHCI_USBLEGCTLSTS_SOOE);
 711#endif
 712
 713		/* some systems get upset if this semaphore is
 714		 * set for any other reason than forcing a BIOS
 715		 * handoff..
 716		 */
 717		pci_write_config_byte(pdev, offset + 3, 1);
 718	}
 719
 720	/* if boot firmware now owns EHCI, spin till it hands it over. */
 721	if (try_handoff) {
 722		int msec = 1000;
 723		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
 724			tried_handoff = 1;
 725			msleep(10);
 726			msec -= 10;
 727			pci_read_config_dword(pdev, offset, &cap);
 728		}
 729	}
 730
 731	if (cap & EHCI_USBLEGSUP_BIOS) {
 732		/* well, possibly buggy BIOS... try to shut it down,
 733		 * and hope nothing goes too wrong
 734		 */
 735		if (try_handoff)
 736			dev_warn(&pdev->dev,
 737				 "EHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 738				 cap);
 739		pci_write_config_byte(pdev, offset + 2, 0);
 740	}
 741
 742	/* just in case, always disable EHCI SMIs */
 743	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
 744
 745	/* If the BIOS ever owned the controller then we can't expect
 746	 * any power sessions to remain intact.
 747	 */
 748	if (tried_handoff)
 749		writel(0, op_reg_base + EHCI_CONFIGFLAG);
 750}
 751
 752static void quirk_usb_disable_ehci(struct pci_dev *pdev)
 753{
 754	void __iomem *base, *op_reg_base;
 755	u32	hcc_params, cap, val;
 756	u8	offset, cap_length;
 757	int	wait_time, count = 256/4;
 758
 759	if (!mmio_resource_enabled(pdev, 0))
 760		return;
 761
 762	base = pci_ioremap_bar(pdev, 0);
 763	if (base == NULL)
 764		return;
 765
 766	cap_length = readb(base);
 767	op_reg_base = base + cap_length;
 768
 769	/* EHCI 0.96 and later may have "extended capabilities"
 770	 * spec section 5.1 explains the bios handoff, e.g. for
 771	 * booting from USB disk or using a usb keyboard
 772	 */
 773	hcc_params = readl(base + EHCI_HCC_PARAMS);
 774	offset = (hcc_params >> 8) & 0xff;
 775	while (offset && --count) {
 776		pci_read_config_dword(pdev, offset, &cap);
 777
 778		switch (cap & 0xff) {
 779		case 1:
 780			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
 781			break;
 782		case 0: /* Illegal reserved cap, set cap=0 so we exit */
 783			cap = 0; /* then fallthrough... */
 784		default:
 785			dev_warn(&pdev->dev,
 786				 "EHCI: unrecognized capability %02x\n",
 787				 cap & 0xff);
 788		}
 789		offset = (cap >> 8) & 0xff;
 790	}
 791	if (!count)
 792		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
 793
 794	/*
 795	 * halt EHCI & disable its interrupts in any case
 796	 */
 797	val = readl(op_reg_base + EHCI_USBSTS);
 798	if ((val & EHCI_USBSTS_HALTED) == 0) {
 799		val = readl(op_reg_base + EHCI_USBCMD);
 800		val &= ~EHCI_USBCMD_RUN;
 801		writel(val, op_reg_base + EHCI_USBCMD);
 802
 803		wait_time = 2000;
 804		do {
 805			writel(0x3f, op_reg_base + EHCI_USBSTS);
 806			udelay(100);
 807			wait_time -= 100;
 808			val = readl(op_reg_base + EHCI_USBSTS);
 809			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
 810				break;
 811			}
 812		} while (wait_time > 0);
 813	}
 814	writel(0, op_reg_base + EHCI_USBINTR);
 815	writel(0x3f, op_reg_base + EHCI_USBSTS);
 816
 817	iounmap(base);
 818}
 819
 820/*
 821 * handshake - spin reading a register until handshake completes
 822 * @ptr: address of hc register to be read
 823 * @mask: bits to look at in result of read
 824 * @done: value of those bits when handshake succeeds
 825 * @wait_usec: timeout in microseconds
 826 * @delay_usec: delay in microseconds to wait between polling
 827 *
 828 * Polls a register every delay_usec microseconds.
 829 * Returns 0 when the mask bits have the value done.
 830 * Returns -ETIMEDOUT if this condition is not true after
 831 * wait_usec microseconds have passed.
 832 */
 833static int handshake(void __iomem *ptr, u32 mask, u32 done,
 834		int wait_usec, int delay_usec)
 835{
 836	u32	result;
 837
 838	do {
 839		result = readl(ptr);
 840		result &= mask;
 841		if (result == done)
 842			return 0;
 843		udelay(delay_usec);
 844		wait_usec -= delay_usec;
 845	} while (wait_usec > 0);
 846	return -ETIMEDOUT;
 847}
 848
 849/*
 850 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
 851 * share some number of ports.  These ports can be switched between either
 852 * controller.  Not all of the ports under the EHCI host controller may be
 853 * switchable.
 854 *
 855 * The ports should be switched over to xHCI before PCI probes for any device
 856 * start.  This avoids active devices under EHCI being disconnected during the
 857 * port switchover, which could cause loss of data on USB storage devices, or
 858 * failed boot when the root file system is on a USB mass storage device and is
 859 * enumerated under EHCI first.
 860 *
 861 * We write into the xHC's PCI configuration space in some Intel-specific
 862 * registers to switch the ports over.  The USB 3.0 terminations and the USB
 863 * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
 864 * terminations before switching the USB 2.0 wires over, so that USB 3.0
 865 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
 866 */
 867void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
 868{
 869	u32		ports_available;
 870	bool		ehci_found = false;
 871	struct pci_dev	*companion = NULL;
 872
 873	/* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
 874	 * switching ports from EHCI to xHCI
 875	 */
 876	if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
 877	    xhci_pdev->subsystem_device == 0x90a8)
 878		return;
 879
 880	/* make sure an intel EHCI controller exists */
 881	for_each_pci_dev(companion) {
 882		if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
 883		    companion->vendor == PCI_VENDOR_ID_INTEL) {
 884			ehci_found = true;
 885			break;
 886		}
 887	}
 888
 889	if (!ehci_found)
 890		return;
 891
 892	/* Don't switchover the ports if the user hasn't compiled the xHCI
 893	 * driver.  Otherwise they will see "dead" USB ports that don't power
 894	 * the devices.
 895	 */
 896	if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
 897		dev_warn(&xhci_pdev->dev,
 898			 "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n");
 899		dev_warn(&xhci_pdev->dev,
 900				"USB 3.0 devices will work at USB 2.0 speeds.\n");
 901		usb_disable_xhci_ports(xhci_pdev);
 902		return;
 903	}
 904
 905	/* Read USB3PRM, the USB 3.0 Port Routing Mask Register
 906	 * Indicate the ports that can be changed from OS.
 907	 */
 908	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
 909			&ports_available);
 910
 911	dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
 912			ports_available);
 913
 914	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
 915	 * Register, to turn on SuperSpeed terminations for the
 916	 * switchable ports.
 917	 */
 918	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
 919			ports_available);
 920
 921	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
 922			&ports_available);
 923	dev_dbg(&xhci_pdev->dev,
 924		"USB 3.0 ports that are now enabled under xHCI: 0x%x\n",
 925		ports_available);
 926
 927	/* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
 928	 * Indicate the USB 2.0 ports to be controlled by the xHCI host.
 929	 */
 930
 931	pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
 932			&ports_available);
 933
 934	dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
 935			ports_available);
 936
 937	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
 938	 * switch the USB 2.0 power and data lines over to the xHCI
 939	 * host.
 940	 */
 941	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
 942			ports_available);
 943
 944	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
 945			&ports_available);
 946	dev_dbg(&xhci_pdev->dev,
 947		"USB 2.0 ports that are now switched over to xHCI: 0x%x\n",
 948		ports_available);
 949}
 950EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
 951
 952void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
 953{
 954	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
 955	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
 956}
 957EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
 958
 959/**
 960 * PCI Quirks for xHCI.
 961 *
 962 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
 963 * It signals to the BIOS that the OS wants control of the host controller,
 964 * and then waits 5 seconds for the BIOS to hand over control.
 965 * If we timeout, assume the BIOS is broken and take control anyway.
 966 */
 967static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
 968{
 969	void __iomem *base;
 970	int ext_cap_offset;
 971	void __iomem *op_reg_base;
 972	u32 val;
 973	int timeout;
 974	int len = pci_resource_len(pdev, 0);
 975
 976	if (!mmio_resource_enabled(pdev, 0))
 977		return;
 978
 979	base = ioremap_nocache(pci_resource_start(pdev, 0), len);
 980	if (base == NULL)
 981		return;
 982
 983	/*
 984	 * Find the Legacy Support Capability register -
 985	 * this is optional for xHCI host controllers.
 986	 */
 987	ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY);
 988
 989	if (!ext_cap_offset)
 990		goto hc_init;
 991
 992	if ((ext_cap_offset + sizeof(val)) > len) {
 993		/* We're reading garbage from the controller */
 994		dev_warn(&pdev->dev, "xHCI controller failing to respond");
 995		goto iounmap;
 996	}
 997	val = readl(base + ext_cap_offset);
 998
 
 
 
 
 
 
 
 
 999	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
1000	if (val & XHCI_HC_BIOS_OWNED) {
1001		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
1002
1003		/* Wait for 5 seconds with 10 microsecond polling interval */
1004		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
1005				0, 5000, 10);
1006
1007		/* Assume a buggy BIOS and take HC ownership anyway */
1008		if (timeout) {
1009			dev_warn(&pdev->dev,
1010				 "xHCI BIOS handoff failed (BIOS bug ?) %08x\n",
1011				 val);
1012			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
1013		}
1014	}
1015
1016	val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1017	/* Mask off (turn off) any enabled SMIs */
1018	val &= XHCI_LEGACY_DISABLE_SMI;
1019	/* Mask all SMI events bits, RW1C */
1020	val |= XHCI_LEGACY_SMI_EVENTS;
1021	/* Disable any BIOS SMIs and clear all SMI events*/
1022	writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1023
1024hc_init:
1025	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
1026		usb_enable_intel_xhci_ports(pdev);
1027
1028	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
1029
1030	/* Wait for the host controller to be ready before writing any
1031	 * operational or runtime registers.  Wait 5 seconds and no more.
1032	 */
1033	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
1034			5000, 10);
1035	/* Assume a buggy HC and start HC initialization anyway */
1036	if (timeout) {
1037		val = readl(op_reg_base + XHCI_STS_OFFSET);
1038		dev_warn(&pdev->dev,
1039			 "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n",
1040			 val);
1041	}
1042
1043	/* Send the halt and disable interrupts command */
1044	val = readl(op_reg_base + XHCI_CMD_OFFSET);
1045	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
1046	writel(val, op_reg_base + XHCI_CMD_OFFSET);
1047
1048	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
1049	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
1050			XHCI_MAX_HALT_USEC, 125);
1051	if (timeout) {
1052		val = readl(op_reg_base + XHCI_STS_OFFSET);
1053		dev_warn(&pdev->dev,
1054			 "xHCI HW did not halt within %d usec status = 0x%x\n",
1055			 XHCI_MAX_HALT_USEC, val);
1056	}
1057
1058iounmap:
1059	iounmap(base);
1060}
1061
1062static void quirk_usb_early_handoff(struct pci_dev *pdev)
1063{
1064	/* Skip Netlogic mips SoC's internal PCI USB controller.
1065	 * This device does not need/support EHCI/OHCI handoff
1066	 */
1067	if (pdev->vendor == 0x184e)	/* vendor Netlogic */
1068		return;
1069	if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
1070			pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
1071			pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
1072			pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
1073		return;
1074
1075	if (pci_enable_device(pdev) < 0) {
1076		dev_warn(&pdev->dev,
1077			 "Can't enable PCI device, BIOS handoff failed.\n");
1078		return;
1079	}
1080	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
1081		quirk_usb_handoff_uhci(pdev);
1082	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
1083		quirk_usb_handoff_ohci(pdev);
1084	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
1085		quirk_usb_disable_ehci(pdev);
1086	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
1087		quirk_usb_handoff_xhci(pdev);
1088	pci_disable_device(pdev);
1089}
1090DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1091			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This file contains code to reset and initialize USB host controllers.
   4 * Some of it includes work-arounds for PCI hardware and BIOS quirks.
   5 * It may need to run early during booting -- before USB would normally
   6 * initialize -- to ensure that Linux doesn't use any legacy modes.
   7 *
   8 *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
   9 *  (and others)
  10 */
  11
  12#include <linux/types.h>
 
  13#include <linux/kernel.h>
  14#include <linux/pci.h>
  15#include <linux/delay.h>
  16#include <linux/export.h>
  17#include <linux/acpi.h>
  18#include <linux/dmi.h>
  19#include "pci-quirks.h"
  20#include "xhci-ext-caps.h"
  21
  22
  23#define UHCI_USBLEGSUP		0xc0		/* legacy support */
  24#define UHCI_USBCMD		0		/* command register */
  25#define UHCI_USBINTR		4		/* interrupt register */
  26#define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
  27#define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
  28#define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
  29#define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
  30#define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
  31#define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
  32#define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
  33
  34#define OHCI_CONTROL		0x04
  35#define OHCI_CMDSTATUS		0x08
  36#define OHCI_INTRSTATUS		0x0c
  37#define OHCI_INTRENABLE		0x10
  38#define OHCI_INTRDISABLE	0x14
  39#define OHCI_FMINTERVAL		0x34
  40#define OHCI_HCFS		(3 << 6)	/* hc functional state */
  41#define OHCI_HCR		(1 << 0)	/* host controller reset */
  42#define OHCI_OCR		(1 << 3)	/* ownership change request */
  43#define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
  44#define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
  45#define OHCI_INTR_OC		(1 << 30)	/* ownership change */
  46
  47#define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
  48#define EHCI_USBCMD		0		/* command register */
  49#define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
  50#define EHCI_USBSTS		4		/* status register */
  51#define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
  52#define EHCI_USBINTR		8		/* interrupt register */
  53#define EHCI_CONFIGFLAG		0x40		/* configured flag register */
  54#define EHCI_USBLEGSUP		0		/* legacy support register */
  55#define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
  56#define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
  57#define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
  58#define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
  59
  60/* AMD quirk use */
  61#define	AB_REG_BAR_LOW		0xe0
  62#define	AB_REG_BAR_HIGH		0xe1
  63#define	AB_REG_BAR_SB700	0xf0
  64#define	AB_INDX(addr)		((addr) + 0x00)
  65#define	AB_DATA(addr)		((addr) + 0x04)
  66#define	AX_INDXC		0x30
  67#define	AX_DATAC		0x34
  68
  69#define PT_ADDR_INDX		0xE8
  70#define PT_READ_INDX		0xE4
  71#define PT_SIG_1_ADDR		0xA520
  72#define PT_SIG_2_ADDR		0xA521
  73#define PT_SIG_3_ADDR		0xA522
  74#define PT_SIG_4_ADDR		0xA523
  75#define PT_SIG_1_DATA		0x78
  76#define PT_SIG_2_DATA		0x56
  77#define PT_SIG_3_DATA		0x34
  78#define PT_SIG_4_DATA		0x12
  79#define PT4_P1_REG		0xB521
  80#define PT4_P2_REG		0xB522
  81#define PT2_P1_REG		0xD520
  82#define PT2_P2_REG		0xD521
  83#define PT1_P1_REG		0xD522
  84#define PT1_P2_REG		0xD523
  85
  86#define	NB_PCIE_INDX_ADDR	0xe0
  87#define	NB_PCIE_INDX_DATA	0xe4
  88#define	PCIE_P_CNTL		0x10040
  89#define	BIF_NB			0x10002
  90#define	NB_PIF0_PWRDOWN_0	0x01100012
  91#define	NB_PIF0_PWRDOWN_1	0x01100013
  92
  93#define USB_INTEL_XUSB2PR      0xD0
  94#define USB_INTEL_USB2PRM      0xD4
  95#define USB_INTEL_USB3_PSSEN   0xD8
  96#define USB_INTEL_USB3PRM      0xDC
  97
  98/* ASMEDIA quirk use */
  99#define ASMT_DATA_WRITE0_REG	0xF8
 100#define ASMT_DATA_WRITE1_REG	0xFC
 101#define ASMT_CONTROL_REG	0xE0
 102#define ASMT_CONTROL_WRITE_BIT	0x02
 103#define ASMT_WRITEREG_CMD	0x10423
 104#define ASMT_FLOWCTL_ADDR	0xFA30
 105#define ASMT_FLOWCTL_DATA	0xBA
 106#define ASMT_PSEUDO_DATA	0
 107
 108/*
 109 * amd_chipset_gen values represent AMD different chipset generations
 110 */
 111enum amd_chipset_gen {
 112	NOT_AMD_CHIPSET = 0,
 113	AMD_CHIPSET_SB600,
 114	AMD_CHIPSET_SB700,
 115	AMD_CHIPSET_SB800,
 116	AMD_CHIPSET_HUDSON2,
 117	AMD_CHIPSET_BOLTON,
 118	AMD_CHIPSET_YANGTZE,
 119	AMD_CHIPSET_TAISHAN,
 120	AMD_CHIPSET_UNKNOWN,
 121};
 122
 123struct amd_chipset_type {
 124	enum amd_chipset_gen gen;
 125	u8 rev;
 126};
 127
 128static struct amd_chipset_info {
 129	struct pci_dev	*nb_dev;
 130	struct pci_dev	*smbus_dev;
 131	int nb_type;
 132	struct amd_chipset_type sb_type;
 133	int isoc_reqs;
 134	int probe_count;
 135	bool need_pll_quirk;
 136} amd_chipset;
 137
 138static DEFINE_SPINLOCK(amd_lock);
 139
 140/*
 141 * amd_chipset_sb_type_init - initialize amd chipset southbridge type
 142 *
 143 * AMD FCH/SB generation and revision is identified by SMBus controller
 144 * vendor, device and revision IDs.
 145 *
 146 * Returns: 1 if it is an AMD chipset, 0 otherwise.
 147 */
 148static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
 149{
 150	u8 rev = 0;
 151	pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
 152
 153	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
 154			PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
 155	if (pinfo->smbus_dev) {
 156		rev = pinfo->smbus_dev->revision;
 157		if (rev >= 0x10 && rev <= 0x1f)
 158			pinfo->sb_type.gen = AMD_CHIPSET_SB600;
 159		else if (rev >= 0x30 && rev <= 0x3f)
 160			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
 161		else if (rev >= 0x40 && rev <= 0x4f)
 162			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
 163	} else {
 164		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 165				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
 166
 167		if (pinfo->smbus_dev) {
 168			rev = pinfo->smbus_dev->revision;
 169			if (rev >= 0x11 && rev <= 0x14)
 170				pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
 171			else if (rev >= 0x15 && rev <= 0x18)
 172				pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
 173			else if (rev >= 0x39 && rev <= 0x3a)
 174				pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
 175		} else {
 176			pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 177							  0x145c, NULL);
 178			if (pinfo->smbus_dev) {
 179				rev = pinfo->smbus_dev->revision;
 180				pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
 181			} else {
 182				pinfo->sb_type.gen = NOT_AMD_CHIPSET;
 183				return 0;
 184			}
 185		}
 
 
 
 
 
 
 
 
 186	}
 
 187	pinfo->sb_type.rev = rev;
 188	return 1;
 189}
 190
 191void sb800_prefetch(struct device *dev, int on)
 192{
 193	u16 misc;
 194	struct pci_dev *pdev = to_pci_dev(dev);
 195
 196	pci_read_config_word(pdev, 0x50, &misc);
 197	if (on == 0)
 198		pci_write_config_word(pdev, 0x50, misc & 0xfcff);
 199	else
 200		pci_write_config_word(pdev, 0x50, misc | 0x0300);
 201}
 202EXPORT_SYMBOL_GPL(sb800_prefetch);
 203
 204static void usb_amd_find_chipset_info(void)
 205{
 206	unsigned long flags;
 207	struct amd_chipset_info info;
 208	info.need_pll_quirk = 0;
 209
 210	spin_lock_irqsave(&amd_lock, flags);
 211
 212	/* probe only once */
 213	if (amd_chipset.probe_count > 0) {
 214		amd_chipset.probe_count++;
 215		spin_unlock_irqrestore(&amd_lock, flags);
 216		return;
 217	}
 218	memset(&info, 0, sizeof(info));
 219	spin_unlock_irqrestore(&amd_lock, flags);
 220
 221	if (!amd_chipset_sb_type_init(&info)) {
 
 222		goto commit;
 223	}
 224
 225	switch (info.sb_type.gen) {
 226	case AMD_CHIPSET_SB700:
 227		info.need_pll_quirk = info.sb_type.rev <= 0x3B;
 228		break;
 229	case AMD_CHIPSET_SB800:
 230	case AMD_CHIPSET_HUDSON2:
 231	case AMD_CHIPSET_BOLTON:
 232		info.need_pll_quirk = 1;
 233		break;
 234	default:
 235		info.need_pll_quirk = 0;
 236		break;
 237	}
 238
 239	if (!info.need_pll_quirk) {
 240		if (info.smbus_dev) {
 241			pci_dev_put(info.smbus_dev);
 242			info.smbus_dev = NULL;
 243		}
 
 244		goto commit;
 245	}
 246
 247	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
 248	if (info.nb_dev) {
 249		info.nb_type = 1;
 250	} else {
 251		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
 252		if (info.nb_dev) {
 253			info.nb_type = 2;
 254		} else {
 255			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 256						     0x9600, NULL);
 257			if (info.nb_dev)
 258				info.nb_type = 3;
 259		}
 260	}
 261
 
 262	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
 263
 264commit:
 265
 266	spin_lock_irqsave(&amd_lock, flags);
 267	if (amd_chipset.probe_count > 0) {
 268		/* race - someone else was faster - drop devices */
 269
 270		/* Mark that we where here */
 271		amd_chipset.probe_count++;
 
 272
 273		spin_unlock_irqrestore(&amd_lock, flags);
 274
 275		pci_dev_put(info.nb_dev);
 276		pci_dev_put(info.smbus_dev);
 277
 278	} else {
 279		/* no race - commit the result */
 280		info.probe_count++;
 281		amd_chipset = info;
 282		spin_unlock_irqrestore(&amd_lock, flags);
 283	}
 
 
 284}
 
 285
 286int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
 287{
 288	/* Make sure amd chipset type has already been initialized */
 289	usb_amd_find_chipset_info();
 290	if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
 291	    amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
 292		dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
 293		return 1;
 294	}
 295	return 0;
 296}
 297EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
 298
 299bool usb_amd_hang_symptom_quirk(void)
 300{
 301	u8 rev;
 302
 303	usb_amd_find_chipset_info();
 304	rev = amd_chipset.sb_type.rev;
 305	/* SB600 and old version of SB700 have hang symptom bug */
 306	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
 307			(amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 308			 rev >= 0x3a && rev <= 0x3b);
 309}
 310EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
 311
 312bool usb_amd_prefetch_quirk(void)
 313{
 314	usb_amd_find_chipset_info();
 315	/* SB800 needs pre-fetch fix */
 316	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
 317}
 318EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
 319
 320bool usb_amd_quirk_pll_check(void)
 321{
 322	usb_amd_find_chipset_info();
 323	return amd_chipset.need_pll_quirk;
 324}
 325EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_check);
 326
 327/*
 328 * The hardware normally enables the A-link power management feature, which
 329 * lets the system lower the power consumption in idle states.
 330 *
 331 * This USB quirk prevents the link going into that lower power state
 332 * during isochronous transfers.
 333 *
 334 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
 335 * some AMD platforms may stutter or have breaks occasionally.
 336 */
 337static void usb_amd_quirk_pll(int disable)
 338{
 339	u32 addr, addr_low, addr_high, val;
 340	u32 bit = disable ? 0 : 1;
 341	unsigned long flags;
 342
 343	spin_lock_irqsave(&amd_lock, flags);
 344
 345	if (disable) {
 346		amd_chipset.isoc_reqs++;
 347		if (amd_chipset.isoc_reqs > 1) {
 348			spin_unlock_irqrestore(&amd_lock, flags);
 349			return;
 350		}
 351	} else {
 352		amd_chipset.isoc_reqs--;
 353		if (amd_chipset.isoc_reqs > 0) {
 354			spin_unlock_irqrestore(&amd_lock, flags);
 355			return;
 356		}
 357	}
 358
 359	if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
 360			amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
 361			amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
 362		outb_p(AB_REG_BAR_LOW, 0xcd6);
 363		addr_low = inb_p(0xcd7);
 364		outb_p(AB_REG_BAR_HIGH, 0xcd6);
 365		addr_high = inb_p(0xcd7);
 366		addr = addr_high << 8 | addr_low;
 367
 368		outl_p(0x30, AB_INDX(addr));
 369		outl_p(0x40, AB_DATA(addr));
 370		outl_p(0x34, AB_INDX(addr));
 371		val = inl_p(AB_DATA(addr));
 372	} else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 373			amd_chipset.sb_type.rev <= 0x3b) {
 374		pci_read_config_dword(amd_chipset.smbus_dev,
 375					AB_REG_BAR_SB700, &addr);
 376		outl(AX_INDXC, AB_INDX(addr));
 377		outl(0x40, AB_DATA(addr));
 378		outl(AX_DATAC, AB_INDX(addr));
 379		val = inl(AB_DATA(addr));
 380	} else {
 381		spin_unlock_irqrestore(&amd_lock, flags);
 382		return;
 383	}
 384
 385	if (disable) {
 386		val &= ~0x08;
 387		val |= (1 << 4) | (1 << 9);
 388	} else {
 389		val |= 0x08;
 390		val &= ~((1 << 4) | (1 << 9));
 391	}
 392	outl_p(val, AB_DATA(addr));
 393
 394	if (!amd_chipset.nb_dev) {
 395		spin_unlock_irqrestore(&amd_lock, flags);
 396		return;
 397	}
 398
 399	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
 400		addr = PCIE_P_CNTL;
 401		pci_write_config_dword(amd_chipset.nb_dev,
 402					NB_PCIE_INDX_ADDR, addr);
 403		pci_read_config_dword(amd_chipset.nb_dev,
 404					NB_PCIE_INDX_DATA, &val);
 405
 406		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
 407		val |= bit | (bit << 3) | (bit << 12);
 408		val |= ((!bit) << 4) | ((!bit) << 9);
 409		pci_write_config_dword(amd_chipset.nb_dev,
 410					NB_PCIE_INDX_DATA, val);
 411
 412		addr = BIF_NB;
 413		pci_write_config_dword(amd_chipset.nb_dev,
 414					NB_PCIE_INDX_ADDR, addr);
 415		pci_read_config_dword(amd_chipset.nb_dev,
 416					NB_PCIE_INDX_DATA, &val);
 417		val &= ~(1 << 8);
 418		val |= bit << 8;
 419
 420		pci_write_config_dword(amd_chipset.nb_dev,
 421					NB_PCIE_INDX_DATA, val);
 422	} else if (amd_chipset.nb_type == 2) {
 423		addr = NB_PIF0_PWRDOWN_0;
 424		pci_write_config_dword(amd_chipset.nb_dev,
 425					NB_PCIE_INDX_ADDR, addr);
 426		pci_read_config_dword(amd_chipset.nb_dev,
 427					NB_PCIE_INDX_DATA, &val);
 428		if (disable)
 429			val &= ~(0x3f << 7);
 430		else
 431			val |= 0x3f << 7;
 432
 433		pci_write_config_dword(amd_chipset.nb_dev,
 434					NB_PCIE_INDX_DATA, val);
 435
 436		addr = NB_PIF0_PWRDOWN_1;
 437		pci_write_config_dword(amd_chipset.nb_dev,
 438					NB_PCIE_INDX_ADDR, addr);
 439		pci_read_config_dword(amd_chipset.nb_dev,
 440					NB_PCIE_INDX_DATA, &val);
 441		if (disable)
 442			val &= ~(0x3f << 7);
 443		else
 444			val |= 0x3f << 7;
 445
 446		pci_write_config_dword(amd_chipset.nb_dev,
 447					NB_PCIE_INDX_DATA, val);
 448	}
 449
 450	spin_unlock_irqrestore(&amd_lock, flags);
 451	return;
 452}
 453
 454void usb_amd_quirk_pll_disable(void)
 455{
 456	usb_amd_quirk_pll(1);
 457}
 458EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
 459
 460static int usb_asmedia_wait_write(struct pci_dev *pdev)
 461{
 462	unsigned long retry_count;
 463	unsigned char value;
 464
 465	for (retry_count = 1000; retry_count > 0; --retry_count) {
 466
 467		pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
 468
 469		if (value == 0xff) {
 470			dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
 471			return -EIO;
 472		}
 473
 474		if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
 475			return 0;
 476
 477		udelay(50);
 478	}
 479
 480	dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
 481	return -ETIMEDOUT;
 482}
 483
 484void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
 485{
 486	if (usb_asmedia_wait_write(pdev) != 0)
 487		return;
 488
 489	/* send command and address to device */
 490	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
 491	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
 492	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
 493
 494	if (usb_asmedia_wait_write(pdev) != 0)
 495		return;
 496
 497	/* send data to device */
 498	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
 499	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
 500	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
 501}
 502EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
 503
 504void usb_amd_quirk_pll_enable(void)
 505{
 506	usb_amd_quirk_pll(0);
 507}
 508EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
 509
 510void usb_amd_dev_put(void)
 511{
 512	struct pci_dev *nb, *smbus;
 513	unsigned long flags;
 514
 515	spin_lock_irqsave(&amd_lock, flags);
 516
 517	amd_chipset.probe_count--;
 518	if (amd_chipset.probe_count > 0) {
 519		spin_unlock_irqrestore(&amd_lock, flags);
 520		return;
 521	}
 522
 523	/* save them to pci_dev_put outside of spinlock */
 524	nb    = amd_chipset.nb_dev;
 525	smbus = amd_chipset.smbus_dev;
 526
 527	amd_chipset.nb_dev = NULL;
 528	amd_chipset.smbus_dev = NULL;
 529	amd_chipset.nb_type = 0;
 530	memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
 531	amd_chipset.isoc_reqs = 0;
 532	amd_chipset.need_pll_quirk = 0;
 533
 534	spin_unlock_irqrestore(&amd_lock, flags);
 535
 536	pci_dev_put(nb);
 537	pci_dev_put(smbus);
 538}
 539EXPORT_SYMBOL_GPL(usb_amd_dev_put);
 540
 541/*
 542 * Check if port is disabled in BIOS on AMD Promontory host.
 543 * BIOS Disabled ports may wake on connect/disconnect and need
 544 * driver workaround to keep them disabled.
 545 * Returns true if port is marked disabled.
 546 */
 547bool usb_amd_pt_check_port(struct device *device, int port)
 548{
 549	unsigned char value, port_shift;
 550	struct pci_dev *pdev;
 551	u16 reg;
 552
 553	pdev = to_pci_dev(device);
 554	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR);
 555
 556	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 557	if (value != PT_SIG_1_DATA)
 558		return false;
 559
 560	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR);
 561
 562	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 563	if (value != PT_SIG_2_DATA)
 564		return false;
 565
 566	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR);
 567
 568	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 569	if (value != PT_SIG_3_DATA)
 570		return false;
 571
 572	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR);
 573
 574	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 575	if (value != PT_SIG_4_DATA)
 576		return false;
 577
 578	/* Check disabled port setting, if bit is set port is enabled */
 579	switch (pdev->device) {
 580	case 0x43b9:
 581	case 0x43ba:
 582	/*
 583	 * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba)
 584	 * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0
 585	 * PT4_P2_REG bits[6..0] represents ports 13 to 7
 586	 */
 587		if (port > 6) {
 588			reg = PT4_P2_REG;
 589			port_shift = port - 7;
 590		} else {
 591			reg = PT4_P1_REG;
 592			port_shift = port + 1;
 593		}
 594		break;
 595	case 0x43bb:
 596	/*
 597	 * device is AMD_PROMONTORYA_2(0x43bb)
 598	 * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0
 599	 * PT2_P2_REG bits[5..0] represents ports 9 to 3
 600	 */
 601		if (port > 2) {
 602			reg = PT2_P2_REG;
 603			port_shift = port - 3;
 604		} else {
 605			reg = PT2_P1_REG;
 606			port_shift = port + 5;
 607		}
 608		break;
 609	case 0x43bc:
 610	/*
 611	 * device is AMD_PROMONTORYA_1(0x43bc)
 612	 * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0
 613	 * PT1_P2_REG[5..0] represents ports 9 to 4
 614	 */
 615		if (port > 3) {
 616			reg = PT1_P2_REG;
 617			port_shift = port - 4;
 618		} else {
 619			reg = PT1_P1_REG;
 620			port_shift = port + 4;
 621		}
 622		break;
 623	default:
 624		return false;
 625	}
 626	pci_write_config_word(pdev, PT_ADDR_INDX, reg);
 627	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 628
 629	return !(value & BIT(port_shift));
 630}
 631EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
 632
 633/*
 634 * Make sure the controller is completely inactive, unable to
 635 * generate interrupts or do DMA.
 636 */
 637void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
 638{
 639	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
 640	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
 641	 */
 642	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
 643
 644	/* Reset the HC - this will force us to get a
 645	 * new notification of any already connected
 646	 * ports due to the virtual disconnect that it
 647	 * implies.
 648	 */
 649	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
 650	mb();
 651	udelay(5);
 652	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
 653		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
 654
 655	/* Just to be safe, disable interrupt requests and
 656	 * make sure the controller is stopped.
 657	 */
 658	outw(0, base + UHCI_USBINTR);
 659	outw(0, base + UHCI_USBCMD);
 660}
 661EXPORT_SYMBOL_GPL(uhci_reset_hc);
 662
 663/*
 664 * Initialize a controller that was newly discovered or has just been
 665 * resumed.  In either case we can't be sure of its previous state.
 666 *
 667 * Returns: 1 if the controller was reset, 0 otherwise.
 668 */
 669int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
 670{
 671	u16 legsup;
 672	unsigned int cmd, intr;
 673
 674	/*
 675	 * When restarting a suspended controller, we expect all the
 676	 * settings to be the same as we left them:
 677	 *
 678	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
 679	 *	Controller is stopped and configured with EGSM set;
 680	 *	No interrupts enabled except possibly Resume Detect.
 681	 *
 682	 * If any of these conditions are violated we do a complete reset.
 683	 */
 684	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
 685	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
 686		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
 687				__func__, legsup);
 688		goto reset_needed;
 689	}
 690
 691	cmd = inw(base + UHCI_USBCMD);
 692	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
 693			!(cmd & UHCI_USBCMD_EGSM)) {
 694		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
 695				__func__, cmd);
 696		goto reset_needed;
 697	}
 698
 699	intr = inw(base + UHCI_USBINTR);
 700	if (intr & (~UHCI_USBINTR_RESUME)) {
 701		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
 702				__func__, intr);
 703		goto reset_needed;
 704	}
 705	return 0;
 706
 707reset_needed:
 708	dev_dbg(&pdev->dev, "Performing full reset\n");
 709	uhci_reset_hc(pdev, base);
 710	return 1;
 711}
 712EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
 713
 714static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
 715{
 716	u16 cmd;
 717	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
 718}
 719
 720#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
 721#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
 722
 723static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
 724{
 725	unsigned long base = 0;
 726	int i;
 727
 728	if (!pio_enabled(pdev))
 729		return;
 730
 731	for (i = 0; i < PCI_ROM_RESOURCE; i++)
 732		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
 733			base = pci_resource_start(pdev, i);
 734			break;
 735		}
 736
 737	if (base)
 738		uhci_check_and_reset_hc(pdev, base);
 739}
 740
 741static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
 742{
 743	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
 744}
 745
 746static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
 747{
 748	void __iomem *base;
 749	u32 control;
 750	u32 fminterval = 0;
 751	bool no_fminterval = false;
 752	int cnt;
 753
 754	if (!mmio_resource_enabled(pdev, 0))
 755		return;
 756
 757	base = pci_ioremap_bar(pdev, 0);
 758	if (base == NULL)
 759		return;
 760
 761	/*
 762	 * ULi M5237 OHCI controller locks the whole system when accessing
 763	 * the OHCI_FMINTERVAL offset.
 764	 */
 765	if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
 766		no_fminterval = true;
 767
 768	control = readl(base + OHCI_CONTROL);
 769
 770/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
 771#ifdef __hppa__
 772#define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
 773#else
 774#define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
 775
 776	if (control & OHCI_CTRL_IR) {
 777		int wait_time = 500; /* arbitrary; 5 seconds */
 778		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
 779		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
 780		while (wait_time > 0 &&
 781				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
 782			wait_time -= 10;
 783			msleep(10);
 784		}
 785		if (wait_time <= 0)
 786			dev_warn(&pdev->dev,
 787				 "OHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 788				 readl(base + OHCI_CONTROL));
 789	}
 790#endif
 791
 792	/* disable interrupts */
 793	writel((u32) ~0, base + OHCI_INTRDISABLE);
 794
 795	/* Go into the USB_RESET state, preserving RWC (and possibly IR) */
 796	writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
 797	readl(base + OHCI_CONTROL);
 
 
 
 
 
 
 798
 799	/* software reset of the controller, preserving HcFmInterval */
 800	if (!no_fminterval)
 801		fminterval = readl(base + OHCI_FMINTERVAL);
 802
 803	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
 804
 805	/* reset requires max 10 us delay */
 806	for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
 807		if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
 808			break;
 809		udelay(1);
 810	}
 811
 812	if (!no_fminterval)
 813		writel(fminterval, base + OHCI_FMINTERVAL);
 814
 815	/* Now the controller is safely in SUSPEND and nothing can wake it up */
 816	iounmap(base);
 817}
 818
 819static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
 820	{
 821		/*  Pegatron Lucid (ExoPC) */
 822		.matches = {
 823			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
 824			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
 825		},
 826	},
 827	{
 828		/*  Pegatron Lucid (Ordissimo AIRIS) */
 829		.matches = {
 830			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
 831			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 832		},
 833	},
 834	{
 835		/*  Pegatron Lucid (Ordissimo) */
 836		.matches = {
 837			DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
 838			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 839		},
 840	},
 841	{
 842		/* HASEE E200 */
 843		.matches = {
 844			DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
 845			DMI_MATCH(DMI_BOARD_NAME, "E210"),
 846			DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
 847		},
 848	},
 849	{ }
 850};
 851
 852static void ehci_bios_handoff(struct pci_dev *pdev,
 853					void __iomem *op_reg_base,
 854					u32 cap, u8 offset)
 855{
 856	int try_handoff = 1, tried_handoff = 0;
 857
 858	/*
 859	 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
 860	 * the handoff on its unused controller.  Skip it.
 861	 *
 862	 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
 863	 */
 864	if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
 865			pdev->device == 0x27cc)) {
 866		if (dmi_check_system(ehci_dmi_nohandoff_table))
 867			try_handoff = 0;
 868	}
 869
 870	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
 871		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
 872
 873#if 0
 874/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
 875 * but that seems dubious in general (the BIOS left it off intentionally)
 876 * and is known to prevent some systems from booting.  so we won't do this
 877 * unless maybe we can determine when we're on a system that needs SMI forced.
 878 */
 879		/* BIOS workaround (?): be sure the pre-Linux code
 880		 * receives the SMI
 881		 */
 882		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
 883		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
 884				       val | EHCI_USBLEGCTLSTS_SOOE);
 885#endif
 886
 887		/* some systems get upset if this semaphore is
 888		 * set for any other reason than forcing a BIOS
 889		 * handoff..
 890		 */
 891		pci_write_config_byte(pdev, offset + 3, 1);
 892	}
 893
 894	/* if boot firmware now owns EHCI, spin till it hands it over. */
 895	if (try_handoff) {
 896		int msec = 1000;
 897		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
 898			tried_handoff = 1;
 899			msleep(10);
 900			msec -= 10;
 901			pci_read_config_dword(pdev, offset, &cap);
 902		}
 903	}
 904
 905	if (cap & EHCI_USBLEGSUP_BIOS) {
 906		/* well, possibly buggy BIOS... try to shut it down,
 907		 * and hope nothing goes too wrong
 908		 */
 909		if (try_handoff)
 910			dev_warn(&pdev->dev,
 911				 "EHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 912				 cap);
 913		pci_write_config_byte(pdev, offset + 2, 0);
 914	}
 915
 916	/* just in case, always disable EHCI SMIs */
 917	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
 918
 919	/* If the BIOS ever owned the controller then we can't expect
 920	 * any power sessions to remain intact.
 921	 */
 922	if (tried_handoff)
 923		writel(0, op_reg_base + EHCI_CONFIGFLAG);
 924}
 925
 926static void quirk_usb_disable_ehci(struct pci_dev *pdev)
 927{
 928	void __iomem *base, *op_reg_base;
 929	u32	hcc_params, cap, val;
 930	u8	offset, cap_length;
 931	int	wait_time, count = 256/4;
 932
 933	if (!mmio_resource_enabled(pdev, 0))
 934		return;
 935
 936	base = pci_ioremap_bar(pdev, 0);
 937	if (base == NULL)
 938		return;
 939
 940	cap_length = readb(base);
 941	op_reg_base = base + cap_length;
 942
 943	/* EHCI 0.96 and later may have "extended capabilities"
 944	 * spec section 5.1 explains the bios handoff, e.g. for
 945	 * booting from USB disk or using a usb keyboard
 946	 */
 947	hcc_params = readl(base + EHCI_HCC_PARAMS);
 948	offset = (hcc_params >> 8) & 0xff;
 949	while (offset && --count) {
 950		pci_read_config_dword(pdev, offset, &cap);
 951
 952		switch (cap & 0xff) {
 953		case 1:
 954			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
 955			break;
 956		case 0: /* Illegal reserved cap, set cap=0 so we exit */
 957			cap = 0; /* fall through */
 958		default:
 959			dev_warn(&pdev->dev,
 960				 "EHCI: unrecognized capability %02x\n",
 961				 cap & 0xff);
 962		}
 963		offset = (cap >> 8) & 0xff;
 964	}
 965	if (!count)
 966		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
 967
 968	/*
 969	 * halt EHCI & disable its interrupts in any case
 970	 */
 971	val = readl(op_reg_base + EHCI_USBSTS);
 972	if ((val & EHCI_USBSTS_HALTED) == 0) {
 973		val = readl(op_reg_base + EHCI_USBCMD);
 974		val &= ~EHCI_USBCMD_RUN;
 975		writel(val, op_reg_base + EHCI_USBCMD);
 976
 977		wait_time = 2000;
 978		do {
 979			writel(0x3f, op_reg_base + EHCI_USBSTS);
 980			udelay(100);
 981			wait_time -= 100;
 982			val = readl(op_reg_base + EHCI_USBSTS);
 983			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
 984				break;
 985			}
 986		} while (wait_time > 0);
 987	}
 988	writel(0, op_reg_base + EHCI_USBINTR);
 989	writel(0x3f, op_reg_base + EHCI_USBSTS);
 990
 991	iounmap(base);
 992}
 993
 994/*
 995 * handshake - spin reading a register until handshake completes
 996 * @ptr: address of hc register to be read
 997 * @mask: bits to look at in result of read
 998 * @done: value of those bits when handshake succeeds
 999 * @wait_usec: timeout in microseconds
1000 * @delay_usec: delay in microseconds to wait between polling
1001 *
1002 * Polls a register every delay_usec microseconds.
1003 * Returns 0 when the mask bits have the value done.
1004 * Returns -ETIMEDOUT if this condition is not true after
1005 * wait_usec microseconds have passed.
1006 */
1007static int handshake(void __iomem *ptr, u32 mask, u32 done,
1008		int wait_usec, int delay_usec)
1009{
1010	u32	result;
1011
1012	do {
1013		result = readl(ptr);
1014		result &= mask;
1015		if (result == done)
1016			return 0;
1017		udelay(delay_usec);
1018		wait_usec -= delay_usec;
1019	} while (wait_usec > 0);
1020	return -ETIMEDOUT;
1021}
1022
1023/*
1024 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
1025 * share some number of ports.  These ports can be switched between either
1026 * controller.  Not all of the ports under the EHCI host controller may be
1027 * switchable.
1028 *
1029 * The ports should be switched over to xHCI before PCI probes for any device
1030 * start.  This avoids active devices under EHCI being disconnected during the
1031 * port switchover, which could cause loss of data on USB storage devices, or
1032 * failed boot when the root file system is on a USB mass storage device and is
1033 * enumerated under EHCI first.
1034 *
1035 * We write into the xHC's PCI configuration space in some Intel-specific
1036 * registers to switch the ports over.  The USB 3.0 terminations and the USB
1037 * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
1038 * terminations before switching the USB 2.0 wires over, so that USB 3.0
1039 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
1040 */
1041void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
1042{
1043	u32		ports_available;
1044	bool		ehci_found = false;
1045	struct pci_dev	*companion = NULL;
1046
1047	/* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
1048	 * switching ports from EHCI to xHCI
1049	 */
1050	if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
1051	    xhci_pdev->subsystem_device == 0x90a8)
1052		return;
1053
1054	/* make sure an intel EHCI controller exists */
1055	for_each_pci_dev(companion) {
1056		if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
1057		    companion->vendor == PCI_VENDOR_ID_INTEL) {
1058			ehci_found = true;
1059			break;
1060		}
1061	}
1062
1063	if (!ehci_found)
1064		return;
1065
1066	/* Don't switchover the ports if the user hasn't compiled the xHCI
1067	 * driver.  Otherwise they will see "dead" USB ports that don't power
1068	 * the devices.
1069	 */
1070	if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
1071		dev_warn(&xhci_pdev->dev,
1072			 "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n");
1073		dev_warn(&xhci_pdev->dev,
1074				"USB 3.0 devices will work at USB 2.0 speeds.\n");
1075		usb_disable_xhci_ports(xhci_pdev);
1076		return;
1077	}
1078
1079	/* Read USB3PRM, the USB 3.0 Port Routing Mask Register
1080	 * Indicate the ports that can be changed from OS.
1081	 */
1082	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
1083			&ports_available);
1084
1085	dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
1086			ports_available);
1087
1088	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
1089	 * Register, to turn on SuperSpeed terminations for the
1090	 * switchable ports.
1091	 */
1092	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
1093			ports_available);
1094
1095	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
1096			&ports_available);
1097	dev_dbg(&xhci_pdev->dev,
1098		"USB 3.0 ports that are now enabled under xHCI: 0x%x\n",
1099		ports_available);
1100
1101	/* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
1102	 * Indicate the USB 2.0 ports to be controlled by the xHCI host.
1103	 */
1104
1105	pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
1106			&ports_available);
1107
1108	dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
1109			ports_available);
1110
1111	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
1112	 * switch the USB 2.0 power and data lines over to the xHCI
1113	 * host.
1114	 */
1115	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
1116			ports_available);
1117
1118	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
1119			&ports_available);
1120	dev_dbg(&xhci_pdev->dev,
1121		"USB 2.0 ports that are now switched over to xHCI: 0x%x\n",
1122		ports_available);
1123}
1124EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
1125
1126void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
1127{
1128	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
1129	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
1130}
1131EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
1132
1133/**
1134 * PCI Quirks for xHCI.
1135 *
1136 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
1137 * It signals to the BIOS that the OS wants control of the host controller,
1138 * and then waits 1 second for the BIOS to hand over control.
1139 * If we timeout, assume the BIOS is broken and take control anyway.
1140 */
1141static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
1142{
1143	void __iomem *base;
1144	int ext_cap_offset;
1145	void __iomem *op_reg_base;
1146	u32 val;
1147	int timeout;
1148	int len = pci_resource_len(pdev, 0);
1149
1150	if (!mmio_resource_enabled(pdev, 0))
1151		return;
1152
1153	base = ioremap_nocache(pci_resource_start(pdev, 0), len);
1154	if (base == NULL)
1155		return;
1156
1157	/*
1158	 * Find the Legacy Support Capability register -
1159	 * this is optional for xHCI host controllers.
1160	 */
1161	ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY);
1162
1163	if (!ext_cap_offset)
1164		goto hc_init;
1165
1166	if ((ext_cap_offset + sizeof(val)) > len) {
1167		/* We're reading garbage from the controller */
1168		dev_warn(&pdev->dev, "xHCI controller failing to respond");
1169		goto iounmap;
1170	}
1171	val = readl(base + ext_cap_offset);
1172
1173	/* Auto handoff never worked for these devices. Force it and continue */
1174	if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) ||
1175			(pdev->vendor == PCI_VENDOR_ID_RENESAS
1176			 && pdev->device == 0x0014)) {
1177		val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED;
1178		writel(val, base + ext_cap_offset);
1179	}
1180
1181	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
1182	if (val & XHCI_HC_BIOS_OWNED) {
1183		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
1184
1185		/* Wait for 1 second with 10 microsecond polling interval */
1186		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
1187				0, 1000000, 10);
1188
1189		/* Assume a buggy BIOS and take HC ownership anyway */
1190		if (timeout) {
1191			dev_warn(&pdev->dev,
1192				 "xHCI BIOS handoff failed (BIOS bug ?) %08x\n",
1193				 val);
1194			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
1195		}
1196	}
1197
1198	val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1199	/* Mask off (turn off) any enabled SMIs */
1200	val &= XHCI_LEGACY_DISABLE_SMI;
1201	/* Mask all SMI events bits, RW1C */
1202	val |= XHCI_LEGACY_SMI_EVENTS;
1203	/* Disable any BIOS SMIs and clear all SMI events*/
1204	writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1205
1206hc_init:
1207	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
1208		usb_enable_intel_xhci_ports(pdev);
1209
1210	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
1211
1212	/* Wait for the host controller to be ready before writing any
1213	 * operational or runtime registers.  Wait 5 seconds and no more.
1214	 */
1215	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
1216			5000000, 10);
1217	/* Assume a buggy HC and start HC initialization anyway */
1218	if (timeout) {
1219		val = readl(op_reg_base + XHCI_STS_OFFSET);
1220		dev_warn(&pdev->dev,
1221			 "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n",
1222			 val);
1223	}
1224
1225	/* Send the halt and disable interrupts command */
1226	val = readl(op_reg_base + XHCI_CMD_OFFSET);
1227	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
1228	writel(val, op_reg_base + XHCI_CMD_OFFSET);
1229
1230	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
1231	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
1232			XHCI_MAX_HALT_USEC, 125);
1233	if (timeout) {
1234		val = readl(op_reg_base + XHCI_STS_OFFSET);
1235		dev_warn(&pdev->dev,
1236			 "xHCI HW did not halt within %d usec status = 0x%x\n",
1237			 XHCI_MAX_HALT_USEC, val);
1238	}
1239
1240iounmap:
1241	iounmap(base);
1242}
1243
1244static void quirk_usb_early_handoff(struct pci_dev *pdev)
1245{
1246	/* Skip Netlogic mips SoC's internal PCI USB controller.
1247	 * This device does not need/support EHCI/OHCI handoff
1248	 */
1249	if (pdev->vendor == 0x184e)	/* vendor Netlogic */
1250		return;
1251	if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
1252			pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
1253			pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
1254			pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
1255		return;
1256
1257	if (pci_enable_device(pdev) < 0) {
1258		dev_warn(&pdev->dev,
1259			 "Can't enable PCI device, BIOS handoff failed.\n");
1260		return;
1261	}
1262	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
1263		quirk_usb_handoff_uhci(pdev);
1264	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
1265		quirk_usb_handoff_ohci(pdev);
1266	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
1267		quirk_usb_disable_ehci(pdev);
1268	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
1269		quirk_usb_handoff_xhci(pdev);
1270	pci_disable_device(pdev);
1271}
1272DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1273			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);