Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.6
 
   1/*
   2 * This file contains code to reset and initialize USB host controllers.
   3 * Some of it includes work-arounds for PCI hardware and BIOS quirks.
   4 * It may need to run early during booting -- before USB would normally
   5 * initialize -- to ensure that Linux doesn't use any legacy modes.
   6 *
   7 *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
   8 *  (and others)
   9 */
  10
  11#include <linux/types.h>
  12#include <linux/kconfig.h>
  13#include <linux/kernel.h>
  14#include <linux/pci.h>
  15#include <linux/delay.h>
  16#include <linux/export.h>
  17#include <linux/acpi.h>
  18#include <linux/dmi.h>
 
 
 
  19#include "pci-quirks.h"
  20#include "xhci-ext-caps.h"
  21
  22
  23#define UHCI_USBLEGSUP		0xc0		/* legacy support */
  24#define UHCI_USBCMD		0		/* command register */
  25#define UHCI_USBINTR		4		/* interrupt register */
  26#define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
  27#define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
  28#define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
  29#define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
  30#define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
  31#define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
  32#define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
  33
  34#define OHCI_CONTROL		0x04
  35#define OHCI_CMDSTATUS		0x08
  36#define OHCI_INTRSTATUS		0x0c
  37#define OHCI_INTRENABLE		0x10
  38#define OHCI_INTRDISABLE	0x14
  39#define OHCI_FMINTERVAL		0x34
  40#define OHCI_HCFS		(3 << 6)	/* hc functional state */
  41#define OHCI_HCR		(1 << 0)	/* host controller reset */
  42#define OHCI_OCR		(1 << 3)	/* ownership change request */
  43#define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
  44#define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
  45#define OHCI_INTR_OC		(1 << 30)	/* ownership change */
  46
  47#define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
  48#define EHCI_USBCMD		0		/* command register */
  49#define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
  50#define EHCI_USBSTS		4		/* status register */
  51#define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
  52#define EHCI_USBINTR		8		/* interrupt register */
  53#define EHCI_CONFIGFLAG		0x40		/* configured flag register */
  54#define EHCI_USBLEGSUP		0		/* legacy support register */
  55#define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
  56#define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
  57#define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
  58#define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
  59
  60/* AMD quirk use */
  61#define	AB_REG_BAR_LOW		0xe0
  62#define	AB_REG_BAR_HIGH		0xe1
  63#define	AB_REG_BAR_SB700	0xf0
  64#define	AB_INDX(addr)		((addr) + 0x00)
  65#define	AB_DATA(addr)		((addr) + 0x04)
  66#define	AX_INDXC		0x30
  67#define	AX_DATAC		0x34
  68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69#define	NB_PCIE_INDX_ADDR	0xe0
  70#define	NB_PCIE_INDX_DATA	0xe4
  71#define	PCIE_P_CNTL		0x10040
  72#define	BIF_NB			0x10002
  73#define	NB_PIF0_PWRDOWN_0	0x01100012
  74#define	NB_PIF0_PWRDOWN_1	0x01100013
  75
  76#define USB_INTEL_XUSB2PR      0xD0
  77#define USB_INTEL_USB2PRM      0xD4
  78#define USB_INTEL_USB3_PSSEN   0xD8
  79#define USB_INTEL_USB3PRM      0xDC
  80
 
 
 
 
 
 
 
 
 
 
  81/*
  82 * amd_chipset_gen values represent AMD different chipset generations
  83 */
  84enum amd_chipset_gen {
  85	NOT_AMD_CHIPSET = 0,
  86	AMD_CHIPSET_SB600,
  87	AMD_CHIPSET_SB700,
  88	AMD_CHIPSET_SB800,
  89	AMD_CHIPSET_HUDSON2,
  90	AMD_CHIPSET_BOLTON,
  91	AMD_CHIPSET_YANGTZE,
 
  92	AMD_CHIPSET_UNKNOWN,
  93};
  94
  95struct amd_chipset_type {
  96	enum amd_chipset_gen gen;
  97	u8 rev;
  98};
  99
 100static struct amd_chipset_info {
 101	struct pci_dev	*nb_dev;
 102	struct pci_dev	*smbus_dev;
 103	int nb_type;
 104	struct amd_chipset_type sb_type;
 105	int isoc_reqs;
 106	int probe_count;
 107	int probe_result;
 108} amd_chipset;
 109
 110static DEFINE_SPINLOCK(amd_lock);
 111
 112/*
 113 * amd_chipset_sb_type_init - initialize amd chipset southbridge type
 114 *
 115 * AMD FCH/SB generation and revision is identified by SMBus controller
 116 * vendor, device and revision IDs.
 117 *
 118 * Returns: 1 if it is an AMD chipset, 0 otherwise.
 119 */
 120static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
 121{
 122	u8 rev = 0;
 123	pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
 124
 125	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
 126			PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
 127	if (pinfo->smbus_dev) {
 128		rev = pinfo->smbus_dev->revision;
 129		if (rev >= 0x10 && rev <= 0x1f)
 130			pinfo->sb_type.gen = AMD_CHIPSET_SB600;
 131		else if (rev >= 0x30 && rev <= 0x3f)
 132			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
 133		else if (rev >= 0x40 && rev <= 0x4f)
 134			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
 135	} else {
 136		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 137				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
 138
 139		if (!pinfo->smbus_dev) {
 140			pinfo->sb_type.gen = NOT_AMD_CHIPSET;
 141			return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 142		}
 143
 144		rev = pinfo->smbus_dev->revision;
 145		if (rev >= 0x11 && rev <= 0x14)
 146			pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
 147		else if (rev >= 0x15 && rev <= 0x18)
 148			pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
 149		else if (rev >= 0x39 && rev <= 0x3a)
 150			pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
 151	}
 152
 153	pinfo->sb_type.rev = rev;
 154	return 1;
 155}
 156
 157void sb800_prefetch(struct device *dev, int on)
 158{
 159	u16 misc;
 160	struct pci_dev *pdev = to_pci_dev(dev);
 161
 162	pci_read_config_word(pdev, 0x50, &misc);
 163	if (on == 0)
 164		pci_write_config_word(pdev, 0x50, misc & 0xfcff);
 165	else
 166		pci_write_config_word(pdev, 0x50, misc | 0x0300);
 167}
 168EXPORT_SYMBOL_GPL(sb800_prefetch);
 169
 170int usb_amd_find_chipset_info(void)
 171{
 172	unsigned long flags;
 173	struct amd_chipset_info info;
 174	int ret;
 175
 176	spin_lock_irqsave(&amd_lock, flags);
 177
 178	/* probe only once */
 179	if (amd_chipset.probe_count > 0) {
 180		amd_chipset.probe_count++;
 181		spin_unlock_irqrestore(&amd_lock, flags);
 182		return amd_chipset.probe_result;
 183	}
 184	memset(&info, 0, sizeof(info));
 185	spin_unlock_irqrestore(&amd_lock, flags);
 186
 187	if (!amd_chipset_sb_type_init(&info)) {
 188		ret = 0;
 189		goto commit;
 190	}
 191
 192	/* Below chipset generations needn't enable AMD PLL quirk */
 193	if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
 194			info.sb_type.gen == AMD_CHIPSET_SB600 ||
 195			info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
 196			(info.sb_type.gen == AMD_CHIPSET_SB700 &&
 197			info.sb_type.rev > 0x3b)) {
 
 
 
 
 
 
 
 
 
 198		if (info.smbus_dev) {
 199			pci_dev_put(info.smbus_dev);
 200			info.smbus_dev = NULL;
 201		}
 202		ret = 0;
 203		goto commit;
 204	}
 205
 206	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
 207	if (info.nb_dev) {
 208		info.nb_type = 1;
 209	} else {
 210		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
 211		if (info.nb_dev) {
 212			info.nb_type = 2;
 213		} else {
 214			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 215						     0x9600, NULL);
 216			if (info.nb_dev)
 217				info.nb_type = 3;
 218		}
 219	}
 220
 221	ret = info.probe_result = 1;
 222	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
 223
 224commit:
 225
 226	spin_lock_irqsave(&amd_lock, flags);
 227	if (amd_chipset.probe_count > 0) {
 228		/* race - someone else was faster - drop devices */
 229
 230		/* Mark that we where here */
 231		amd_chipset.probe_count++;
 232		ret = amd_chipset.probe_result;
 233
 234		spin_unlock_irqrestore(&amd_lock, flags);
 235
 236		pci_dev_put(info.nb_dev);
 237		pci_dev_put(info.smbus_dev);
 238
 239	} else {
 240		/* no race - commit the result */
 241		info.probe_count++;
 242		amd_chipset = info;
 243		spin_unlock_irqrestore(&amd_lock, flags);
 244	}
 245
 246	return ret;
 247}
 248EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
 249
 250int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
 251{
 252	/* Make sure amd chipset type has already been initialized */
 253	usb_amd_find_chipset_info();
 254	if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
 255		return 0;
 256
 257	dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
 258	return 1;
 
 259}
 260EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
 261
 262bool usb_amd_hang_symptom_quirk(void)
 263{
 264	u8 rev;
 265
 266	usb_amd_find_chipset_info();
 267	rev = amd_chipset.sb_type.rev;
 268	/* SB600 and old version of SB700 have hang symptom bug */
 269	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
 270			(amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 271			 rev >= 0x3a && rev <= 0x3b);
 272}
 273EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
 274
 275bool usb_amd_prefetch_quirk(void)
 276{
 277	usb_amd_find_chipset_info();
 278	/* SB800 needs pre-fetch fix */
 279	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
 280}
 281EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
 282
 
 
 
 
 
 
 
 283/*
 284 * The hardware normally enables the A-link power management feature, which
 285 * lets the system lower the power consumption in idle states.
 286 *
 287 * This USB quirk prevents the link going into that lower power state
 288 * during isochronous transfers.
 289 *
 290 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
 291 * some AMD platforms may stutter or have breaks occasionally.
 292 */
 293static void usb_amd_quirk_pll(int disable)
 294{
 295	u32 addr, addr_low, addr_high, val;
 296	u32 bit = disable ? 0 : 1;
 297	unsigned long flags;
 298
 299	spin_lock_irqsave(&amd_lock, flags);
 300
 301	if (disable) {
 302		amd_chipset.isoc_reqs++;
 303		if (amd_chipset.isoc_reqs > 1) {
 304			spin_unlock_irqrestore(&amd_lock, flags);
 305			return;
 306		}
 307	} else {
 308		amd_chipset.isoc_reqs--;
 309		if (amd_chipset.isoc_reqs > 0) {
 310			spin_unlock_irqrestore(&amd_lock, flags);
 311			return;
 312		}
 313	}
 314
 315	if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
 316			amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
 317			amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
 318		outb_p(AB_REG_BAR_LOW, 0xcd6);
 319		addr_low = inb_p(0xcd7);
 320		outb_p(AB_REG_BAR_HIGH, 0xcd6);
 321		addr_high = inb_p(0xcd7);
 322		addr = addr_high << 8 | addr_low;
 323
 324		outl_p(0x30, AB_INDX(addr));
 325		outl_p(0x40, AB_DATA(addr));
 326		outl_p(0x34, AB_INDX(addr));
 327		val = inl_p(AB_DATA(addr));
 328	} else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 329			amd_chipset.sb_type.rev <= 0x3b) {
 330		pci_read_config_dword(amd_chipset.smbus_dev,
 331					AB_REG_BAR_SB700, &addr);
 332		outl(AX_INDXC, AB_INDX(addr));
 333		outl(0x40, AB_DATA(addr));
 334		outl(AX_DATAC, AB_INDX(addr));
 335		val = inl(AB_DATA(addr));
 336	} else {
 337		spin_unlock_irqrestore(&amd_lock, flags);
 338		return;
 339	}
 340
 341	if (disable) {
 342		val &= ~0x08;
 343		val |= (1 << 4) | (1 << 9);
 344	} else {
 345		val |= 0x08;
 346		val &= ~((1 << 4) | (1 << 9));
 347	}
 348	outl_p(val, AB_DATA(addr));
 349
 350	if (!amd_chipset.nb_dev) {
 351		spin_unlock_irqrestore(&amd_lock, flags);
 352		return;
 353	}
 354
 355	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
 356		addr = PCIE_P_CNTL;
 357		pci_write_config_dword(amd_chipset.nb_dev,
 358					NB_PCIE_INDX_ADDR, addr);
 359		pci_read_config_dword(amd_chipset.nb_dev,
 360					NB_PCIE_INDX_DATA, &val);
 361
 362		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
 363		val |= bit | (bit << 3) | (bit << 12);
 364		val |= ((!bit) << 4) | ((!bit) << 9);
 365		pci_write_config_dword(amd_chipset.nb_dev,
 366					NB_PCIE_INDX_DATA, val);
 367
 368		addr = BIF_NB;
 369		pci_write_config_dword(amd_chipset.nb_dev,
 370					NB_PCIE_INDX_ADDR, addr);
 371		pci_read_config_dword(amd_chipset.nb_dev,
 372					NB_PCIE_INDX_DATA, &val);
 373		val &= ~(1 << 8);
 374		val |= bit << 8;
 375
 376		pci_write_config_dword(amd_chipset.nb_dev,
 377					NB_PCIE_INDX_DATA, val);
 378	} else if (amd_chipset.nb_type == 2) {
 379		addr = NB_PIF0_PWRDOWN_0;
 380		pci_write_config_dword(amd_chipset.nb_dev,
 381					NB_PCIE_INDX_ADDR, addr);
 382		pci_read_config_dword(amd_chipset.nb_dev,
 383					NB_PCIE_INDX_DATA, &val);
 384		if (disable)
 385			val &= ~(0x3f << 7);
 386		else
 387			val |= 0x3f << 7;
 388
 389		pci_write_config_dword(amd_chipset.nb_dev,
 390					NB_PCIE_INDX_DATA, val);
 391
 392		addr = NB_PIF0_PWRDOWN_1;
 393		pci_write_config_dword(amd_chipset.nb_dev,
 394					NB_PCIE_INDX_ADDR, addr);
 395		pci_read_config_dword(amd_chipset.nb_dev,
 396					NB_PCIE_INDX_DATA, &val);
 397		if (disable)
 398			val &= ~(0x3f << 7);
 399		else
 400			val |= 0x3f << 7;
 401
 402		pci_write_config_dword(amd_chipset.nb_dev,
 403					NB_PCIE_INDX_DATA, val);
 404	}
 405
 406	spin_unlock_irqrestore(&amd_lock, flags);
 407	return;
 408}
 409
 410void usb_amd_quirk_pll_disable(void)
 411{
 412	usb_amd_quirk_pll(1);
 413}
 414EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
 415
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 416void usb_amd_quirk_pll_enable(void)
 417{
 418	usb_amd_quirk_pll(0);
 419}
 420EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
 421
 422void usb_amd_dev_put(void)
 423{
 424	struct pci_dev *nb, *smbus;
 425	unsigned long flags;
 426
 427	spin_lock_irqsave(&amd_lock, flags);
 428
 429	amd_chipset.probe_count--;
 430	if (amd_chipset.probe_count > 0) {
 431		spin_unlock_irqrestore(&amd_lock, flags);
 432		return;
 433	}
 434
 435	/* save them to pci_dev_put outside of spinlock */
 436	nb    = amd_chipset.nb_dev;
 437	smbus = amd_chipset.smbus_dev;
 438
 439	amd_chipset.nb_dev = NULL;
 440	amd_chipset.smbus_dev = NULL;
 441	amd_chipset.nb_type = 0;
 442	memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
 443	amd_chipset.isoc_reqs = 0;
 444	amd_chipset.probe_result = 0;
 445
 446	spin_unlock_irqrestore(&amd_lock, flags);
 447
 448	pci_dev_put(nb);
 449	pci_dev_put(smbus);
 450}
 451EXPORT_SYMBOL_GPL(usb_amd_dev_put);
 452
 453/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454 * Make sure the controller is completely inactive, unable to
 455 * generate interrupts or do DMA.
 456 */
 457void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
 458{
 459	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
 460	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
 461	 */
 462	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
 463
 464	/* Reset the HC - this will force us to get a
 465	 * new notification of any already connected
 466	 * ports due to the virtual disconnect that it
 467	 * implies.
 468	 */
 469	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
 470	mb();
 471	udelay(5);
 472	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
 473		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
 474
 475	/* Just to be safe, disable interrupt requests and
 476	 * make sure the controller is stopped.
 477	 */
 478	outw(0, base + UHCI_USBINTR);
 479	outw(0, base + UHCI_USBCMD);
 480}
 481EXPORT_SYMBOL_GPL(uhci_reset_hc);
 482
 483/*
 484 * Initialize a controller that was newly discovered or has just been
 485 * resumed.  In either case we can't be sure of its previous state.
 486 *
 487 * Returns: 1 if the controller was reset, 0 otherwise.
 488 */
 489int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
 490{
 491	u16 legsup;
 492	unsigned int cmd, intr;
 493
 494	/*
 495	 * When restarting a suspended controller, we expect all the
 496	 * settings to be the same as we left them:
 497	 *
 498	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
 499	 *	Controller is stopped and configured with EGSM set;
 500	 *	No interrupts enabled except possibly Resume Detect.
 501	 *
 502	 * If any of these conditions are violated we do a complete reset.
 503	 */
 504	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
 505	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
 506		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
 507				__func__, legsup);
 508		goto reset_needed;
 509	}
 510
 511	cmd = inw(base + UHCI_USBCMD);
 512	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
 513			!(cmd & UHCI_USBCMD_EGSM)) {
 514		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
 515				__func__, cmd);
 516		goto reset_needed;
 517	}
 518
 519	intr = inw(base + UHCI_USBINTR);
 520	if (intr & (~UHCI_USBINTR_RESUME)) {
 521		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
 522				__func__, intr);
 523		goto reset_needed;
 524	}
 525	return 0;
 526
 527reset_needed:
 528	dev_dbg(&pdev->dev, "Performing full reset\n");
 529	uhci_reset_hc(pdev, base);
 530	return 1;
 531}
 532EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
 533
 534static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
 535{
 536	u16 cmd;
 537	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
 538}
 539
 540#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
 541#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
 542
 543static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
 544{
 545	unsigned long base = 0;
 546	int i;
 547
 548	if (!pio_enabled(pdev))
 549		return;
 550
 551	for (i = 0; i < PCI_ROM_RESOURCE; i++)
 552		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
 553			base = pci_resource_start(pdev, i);
 554			break;
 555		}
 556
 557	if (base)
 558		uhci_check_and_reset_hc(pdev, base);
 559}
 560
 561static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
 562{
 563	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
 564}
 565
 566static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
 567{
 568	void __iomem *base;
 569	u32 control;
 570	u32 fminterval = 0;
 571	bool no_fminterval = false;
 572	int cnt;
 573
 574	if (!mmio_resource_enabled(pdev, 0))
 575		return;
 576
 577	base = pci_ioremap_bar(pdev, 0);
 578	if (base == NULL)
 579		return;
 580
 581	/*
 582	 * ULi M5237 OHCI controller locks the whole system when accessing
 583	 * the OHCI_FMINTERVAL offset.
 584	 */
 585	if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
 586		no_fminterval = true;
 587
 588	control = readl(base + OHCI_CONTROL);
 589
 590/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
 591#ifdef __hppa__
 592#define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
 593#else
 594#define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
 595
 596	if (control & OHCI_CTRL_IR) {
 597		int wait_time = 500; /* arbitrary; 5 seconds */
 598		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
 599		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
 600		while (wait_time > 0 &&
 601				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
 602			wait_time -= 10;
 603			msleep(10);
 604		}
 605		if (wait_time <= 0)
 606			dev_warn(&pdev->dev,
 607				 "OHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 608				 readl(base + OHCI_CONTROL));
 609	}
 610#endif
 611
 612	/* disable interrupts */
 613	writel((u32) ~0, base + OHCI_INTRDISABLE);
 614
 615	/* Reset the USB bus, if the controller isn't already in RESET */
 616	if (control & OHCI_HCFS) {
 617		/* Go into RESET, preserving RWC (and possibly IR) */
 618		writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
 619		readl(base + OHCI_CONTROL);
 620
 621		/* drive bus reset for at least 50 ms (7.1.7.5) */
 622		msleep(50);
 623	}
 624
 625	/* software reset of the controller, preserving HcFmInterval */
 626	if (!no_fminterval)
 627		fminterval = readl(base + OHCI_FMINTERVAL);
 628
 629	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
 630
 631	/* reset requires max 10 us delay */
 632	for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
 633		if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
 634			break;
 635		udelay(1);
 636	}
 637
 638	if (!no_fminterval)
 639		writel(fminterval, base + OHCI_FMINTERVAL);
 640
 641	/* Now the controller is safely in SUSPEND and nothing can wake it up */
 642	iounmap(base);
 643}
 644
 645static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
 646	{
 647		/*  Pegatron Lucid (ExoPC) */
 648		.matches = {
 649			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
 650			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
 651		},
 652	},
 653	{
 654		/*  Pegatron Lucid (Ordissimo AIRIS) */
 655		.matches = {
 656			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
 657			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 658		},
 659	},
 660	{
 661		/*  Pegatron Lucid (Ordissimo) */
 662		.matches = {
 663			DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
 664			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 665		},
 666	},
 667	{
 668		/* HASEE E200 */
 669		.matches = {
 670			DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
 671			DMI_MATCH(DMI_BOARD_NAME, "E210"),
 672			DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
 673		},
 674	},
 675	{ }
 676};
 677
 678static void ehci_bios_handoff(struct pci_dev *pdev,
 679					void __iomem *op_reg_base,
 680					u32 cap, u8 offset)
 681{
 682	int try_handoff = 1, tried_handoff = 0;
 683
 684	/*
 685	 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
 686	 * the handoff on its unused controller.  Skip it.
 687	 *
 688	 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
 689	 */
 690	if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
 691			pdev->device == 0x27cc)) {
 692		if (dmi_check_system(ehci_dmi_nohandoff_table))
 693			try_handoff = 0;
 694	}
 695
 696	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
 697		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
 698
 699#if 0
 700/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
 701 * but that seems dubious in general (the BIOS left it off intentionally)
 702 * and is known to prevent some systems from booting.  so we won't do this
 703 * unless maybe we can determine when we're on a system that needs SMI forced.
 704 */
 705		/* BIOS workaround (?): be sure the pre-Linux code
 706		 * receives the SMI
 707		 */
 708		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
 709		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
 710				       val | EHCI_USBLEGCTLSTS_SOOE);
 711#endif
 712
 713		/* some systems get upset if this semaphore is
 714		 * set for any other reason than forcing a BIOS
 715		 * handoff..
 716		 */
 717		pci_write_config_byte(pdev, offset + 3, 1);
 718	}
 719
 720	/* if boot firmware now owns EHCI, spin till it hands it over. */
 721	if (try_handoff) {
 722		int msec = 1000;
 723		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
 724			tried_handoff = 1;
 725			msleep(10);
 726			msec -= 10;
 727			pci_read_config_dword(pdev, offset, &cap);
 728		}
 729	}
 730
 731	if (cap & EHCI_USBLEGSUP_BIOS) {
 732		/* well, possibly buggy BIOS... try to shut it down,
 733		 * and hope nothing goes too wrong
 734		 */
 735		if (try_handoff)
 736			dev_warn(&pdev->dev,
 737				 "EHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 738				 cap);
 739		pci_write_config_byte(pdev, offset + 2, 0);
 740	}
 741
 742	/* just in case, always disable EHCI SMIs */
 743	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
 744
 745	/* If the BIOS ever owned the controller then we can't expect
 746	 * any power sessions to remain intact.
 747	 */
 748	if (tried_handoff)
 749		writel(0, op_reg_base + EHCI_CONFIGFLAG);
 750}
 751
 752static void quirk_usb_disable_ehci(struct pci_dev *pdev)
 753{
 754	void __iomem *base, *op_reg_base;
 755	u32	hcc_params, cap, val;
 756	u8	offset, cap_length;
 757	int	wait_time, count = 256/4;
 758
 759	if (!mmio_resource_enabled(pdev, 0))
 760		return;
 761
 762	base = pci_ioremap_bar(pdev, 0);
 763	if (base == NULL)
 764		return;
 765
 766	cap_length = readb(base);
 767	op_reg_base = base + cap_length;
 768
 769	/* EHCI 0.96 and later may have "extended capabilities"
 770	 * spec section 5.1 explains the bios handoff, e.g. for
 771	 * booting from USB disk or using a usb keyboard
 772	 */
 773	hcc_params = readl(base + EHCI_HCC_PARAMS);
 774	offset = (hcc_params >> 8) & 0xff;
 775	while (offset && --count) {
 776		pci_read_config_dword(pdev, offset, &cap);
 777
 778		switch (cap & 0xff) {
 779		case 1:
 780			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
 781			break;
 782		case 0: /* Illegal reserved cap, set cap=0 so we exit */
 783			cap = 0; /* then fallthrough... */
 
 784		default:
 785			dev_warn(&pdev->dev,
 786				 "EHCI: unrecognized capability %02x\n",
 787				 cap & 0xff);
 788		}
 789		offset = (cap >> 8) & 0xff;
 790	}
 791	if (!count)
 792		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
 793
 794	/*
 795	 * halt EHCI & disable its interrupts in any case
 796	 */
 797	val = readl(op_reg_base + EHCI_USBSTS);
 798	if ((val & EHCI_USBSTS_HALTED) == 0) {
 799		val = readl(op_reg_base + EHCI_USBCMD);
 800		val &= ~EHCI_USBCMD_RUN;
 801		writel(val, op_reg_base + EHCI_USBCMD);
 802
 803		wait_time = 2000;
 804		do {
 805			writel(0x3f, op_reg_base + EHCI_USBSTS);
 806			udelay(100);
 807			wait_time -= 100;
 808			val = readl(op_reg_base + EHCI_USBSTS);
 809			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
 810				break;
 811			}
 812		} while (wait_time > 0);
 813	}
 814	writel(0, op_reg_base + EHCI_USBINTR);
 815	writel(0x3f, op_reg_base + EHCI_USBSTS);
 816
 817	iounmap(base);
 818}
 819
 820/*
 821 * handshake - spin reading a register until handshake completes
 822 * @ptr: address of hc register to be read
 823 * @mask: bits to look at in result of read
 824 * @done: value of those bits when handshake succeeds
 825 * @wait_usec: timeout in microseconds
 826 * @delay_usec: delay in microseconds to wait between polling
 827 *
 828 * Polls a register every delay_usec microseconds.
 829 * Returns 0 when the mask bits have the value done.
 830 * Returns -ETIMEDOUT if this condition is not true after
 831 * wait_usec microseconds have passed.
 832 */
 833static int handshake(void __iomem *ptr, u32 mask, u32 done,
 834		int wait_usec, int delay_usec)
 835{
 836	u32	result;
 837
 838	do {
 839		result = readl(ptr);
 840		result &= mask;
 841		if (result == done)
 842			return 0;
 843		udelay(delay_usec);
 844		wait_usec -= delay_usec;
 845	} while (wait_usec > 0);
 846	return -ETIMEDOUT;
 847}
 848
 849/*
 850 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
 851 * share some number of ports.  These ports can be switched between either
 852 * controller.  Not all of the ports under the EHCI host controller may be
 853 * switchable.
 854 *
 855 * The ports should be switched over to xHCI before PCI probes for any device
 856 * start.  This avoids active devices under EHCI being disconnected during the
 857 * port switchover, which could cause loss of data on USB storage devices, or
 858 * failed boot when the root file system is on a USB mass storage device and is
 859 * enumerated under EHCI first.
 860 *
 861 * We write into the xHC's PCI configuration space in some Intel-specific
 862 * registers to switch the ports over.  The USB 3.0 terminations and the USB
 863 * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
 864 * terminations before switching the USB 2.0 wires over, so that USB 3.0
 865 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
 866 */
 867void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
 868{
 869	u32		ports_available;
 870	bool		ehci_found = false;
 871	struct pci_dev	*companion = NULL;
 872
 873	/* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
 874	 * switching ports from EHCI to xHCI
 875	 */
 876	if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
 877	    xhci_pdev->subsystem_device == 0x90a8)
 878		return;
 879
 880	/* make sure an intel EHCI controller exists */
 881	for_each_pci_dev(companion) {
 882		if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
 883		    companion->vendor == PCI_VENDOR_ID_INTEL) {
 884			ehci_found = true;
 885			break;
 886		}
 887	}
 888
 889	if (!ehci_found)
 890		return;
 891
 892	/* Don't switchover the ports if the user hasn't compiled the xHCI
 893	 * driver.  Otherwise they will see "dead" USB ports that don't power
 894	 * the devices.
 895	 */
 896	if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
 897		dev_warn(&xhci_pdev->dev,
 898			 "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n");
 899		dev_warn(&xhci_pdev->dev,
 900				"USB 3.0 devices will work at USB 2.0 speeds.\n");
 901		usb_disable_xhci_ports(xhci_pdev);
 902		return;
 903	}
 904
 905	/* Read USB3PRM, the USB 3.0 Port Routing Mask Register
 906	 * Indicate the ports that can be changed from OS.
 907	 */
 908	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
 909			&ports_available);
 910
 911	dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
 912			ports_available);
 913
 914	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
 915	 * Register, to turn on SuperSpeed terminations for the
 916	 * switchable ports.
 917	 */
 918	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
 919			ports_available);
 920
 921	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
 922			&ports_available);
 923	dev_dbg(&xhci_pdev->dev,
 924		"USB 3.0 ports that are now enabled under xHCI: 0x%x\n",
 925		ports_available);
 926
 927	/* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
 928	 * Indicate the USB 2.0 ports to be controlled by the xHCI host.
 929	 */
 930
 931	pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
 932			&ports_available);
 933
 934	dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
 935			ports_available);
 936
 937	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
 938	 * switch the USB 2.0 power and data lines over to the xHCI
 939	 * host.
 940	 */
 941	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
 942			ports_available);
 943
 944	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
 945			&ports_available);
 946	dev_dbg(&xhci_pdev->dev,
 947		"USB 2.0 ports that are now switched over to xHCI: 0x%x\n",
 948		ports_available);
 949}
 950EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
 951
 952void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
 953{
 954	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
 955	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
 956}
 957EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
 958
 959/**
 960 * PCI Quirks for xHCI.
 961 *
 962 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
 963 * It signals to the BIOS that the OS wants control of the host controller,
 964 * and then waits 5 seconds for the BIOS to hand over control.
 965 * If we timeout, assume the BIOS is broken and take control anyway.
 966 */
 967static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
 968{
 969	void __iomem *base;
 970	int ext_cap_offset;
 971	void __iomem *op_reg_base;
 972	u32 val;
 973	int timeout;
 974	int len = pci_resource_len(pdev, 0);
 975
 976	if (!mmio_resource_enabled(pdev, 0))
 977		return;
 978
 979	base = ioremap_nocache(pci_resource_start(pdev, 0), len);
 980	if (base == NULL)
 981		return;
 982
 983	/*
 984	 * Find the Legacy Support Capability register -
 985	 * this is optional for xHCI host controllers.
 986	 */
 987	ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY);
 988
 989	if (!ext_cap_offset)
 990		goto hc_init;
 991
 992	if ((ext_cap_offset + sizeof(val)) > len) {
 993		/* We're reading garbage from the controller */
 994		dev_warn(&pdev->dev, "xHCI controller failing to respond");
 995		goto iounmap;
 996	}
 997	val = readl(base + ext_cap_offset);
 998
 
 
 
 
 
 
 
 
 999	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
1000	if (val & XHCI_HC_BIOS_OWNED) {
1001		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
1002
1003		/* Wait for 5 seconds with 10 microsecond polling interval */
1004		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
1005				0, 5000, 10);
1006
1007		/* Assume a buggy BIOS and take HC ownership anyway */
1008		if (timeout) {
1009			dev_warn(&pdev->dev,
1010				 "xHCI BIOS handoff failed (BIOS bug ?) %08x\n",
1011				 val);
1012			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
1013		}
1014	}
1015
1016	val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1017	/* Mask off (turn off) any enabled SMIs */
1018	val &= XHCI_LEGACY_DISABLE_SMI;
1019	/* Mask all SMI events bits, RW1C */
1020	val |= XHCI_LEGACY_SMI_EVENTS;
1021	/* Disable any BIOS SMIs and clear all SMI events*/
1022	writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1023
1024hc_init:
1025	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
1026		usb_enable_intel_xhci_ports(pdev);
1027
1028	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
1029
1030	/* Wait for the host controller to be ready before writing any
1031	 * operational or runtime registers.  Wait 5 seconds and no more.
1032	 */
1033	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
1034			5000, 10);
1035	/* Assume a buggy HC and start HC initialization anyway */
1036	if (timeout) {
1037		val = readl(op_reg_base + XHCI_STS_OFFSET);
1038		dev_warn(&pdev->dev,
1039			 "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n",
1040			 val);
1041	}
1042
1043	/* Send the halt and disable interrupts command */
1044	val = readl(op_reg_base + XHCI_CMD_OFFSET);
1045	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
1046	writel(val, op_reg_base + XHCI_CMD_OFFSET);
1047
1048	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
1049	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
1050			XHCI_MAX_HALT_USEC, 125);
1051	if (timeout) {
1052		val = readl(op_reg_base + XHCI_STS_OFFSET);
1053		dev_warn(&pdev->dev,
1054			 "xHCI HW did not halt within %d usec status = 0x%x\n",
1055			 XHCI_MAX_HALT_USEC, val);
1056	}
1057
1058iounmap:
1059	iounmap(base);
1060}
1061
1062static void quirk_usb_early_handoff(struct pci_dev *pdev)
1063{
 
 
 
1064	/* Skip Netlogic mips SoC's internal PCI USB controller.
1065	 * This device does not need/support EHCI/OHCI handoff
1066	 */
1067	if (pdev->vendor == 0x184e)	/* vendor Netlogic */
1068		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
1069	if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
1070			pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
1071			pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
1072			pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
1073		return;
1074
1075	if (pci_enable_device(pdev) < 0) {
1076		dev_warn(&pdev->dev,
1077			 "Can't enable PCI device, BIOS handoff failed.\n");
1078		return;
1079	}
1080	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
1081		quirk_usb_handoff_uhci(pdev);
1082	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
1083		quirk_usb_handoff_ohci(pdev);
1084	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
1085		quirk_usb_disable_ehci(pdev);
1086	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
1087		quirk_usb_handoff_xhci(pdev);
1088	pci_disable_device(pdev);
1089}
1090DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1091			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This file contains code to reset and initialize USB host controllers.
   4 * Some of it includes work-arounds for PCI hardware and BIOS quirks.
   5 * It may need to run early during booting -- before USB would normally
   6 * initialize -- to ensure that Linux doesn't use any legacy modes.
   7 *
   8 *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
   9 *  (and others)
  10 */
  11
  12#include <linux/types.h>
 
  13#include <linux/kernel.h>
  14#include <linux/pci.h>
  15#include <linux/delay.h>
  16#include <linux/export.h>
  17#include <linux/acpi.h>
  18#include <linux/dmi.h>
  19#include <linux/of.h>
  20#include <linux/iopoll.h>
  21
  22#include "pci-quirks.h"
  23#include "xhci-ext-caps.h"
  24
  25
  26#define UHCI_USBLEGSUP		0xc0		/* legacy support */
  27#define UHCI_USBCMD		0		/* command register */
  28#define UHCI_USBINTR		4		/* interrupt register */
  29#define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
  30#define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
  31#define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
  32#define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
  33#define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
  34#define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
  35#define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
  36
  37#define OHCI_CONTROL		0x04
  38#define OHCI_CMDSTATUS		0x08
  39#define OHCI_INTRSTATUS		0x0c
  40#define OHCI_INTRENABLE		0x10
  41#define OHCI_INTRDISABLE	0x14
  42#define OHCI_FMINTERVAL		0x34
  43#define OHCI_HCFS		(3 << 6)	/* hc functional state */
  44#define OHCI_HCR		(1 << 0)	/* host controller reset */
  45#define OHCI_OCR		(1 << 3)	/* ownership change request */
  46#define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
  47#define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
  48#define OHCI_INTR_OC		(1 << 30)	/* ownership change */
  49
  50#define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
  51#define EHCI_USBCMD		0		/* command register */
  52#define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
  53#define EHCI_USBSTS		4		/* status register */
  54#define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
  55#define EHCI_USBINTR		8		/* interrupt register */
  56#define EHCI_CONFIGFLAG		0x40		/* configured flag register */
  57#define EHCI_USBLEGSUP		0		/* legacy support register */
  58#define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
  59#define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
  60#define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
  61#define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
  62
  63/* AMD quirk use */
  64#define	AB_REG_BAR_LOW		0xe0
  65#define	AB_REG_BAR_HIGH		0xe1
  66#define	AB_REG_BAR_SB700	0xf0
  67#define	AB_INDX(addr)		((addr) + 0x00)
  68#define	AB_DATA(addr)		((addr) + 0x04)
  69#define	AX_INDXC		0x30
  70#define	AX_DATAC		0x34
  71
  72#define PT_ADDR_INDX		0xE8
  73#define PT_READ_INDX		0xE4
  74#define PT_SIG_1_ADDR		0xA520
  75#define PT_SIG_2_ADDR		0xA521
  76#define PT_SIG_3_ADDR		0xA522
  77#define PT_SIG_4_ADDR		0xA523
  78#define PT_SIG_1_DATA		0x78
  79#define PT_SIG_2_DATA		0x56
  80#define PT_SIG_3_DATA		0x34
  81#define PT_SIG_4_DATA		0x12
  82#define PT4_P1_REG		0xB521
  83#define PT4_P2_REG		0xB522
  84#define PT2_P1_REG		0xD520
  85#define PT2_P2_REG		0xD521
  86#define PT1_P1_REG		0xD522
  87#define PT1_P2_REG		0xD523
  88
  89#define	NB_PCIE_INDX_ADDR	0xe0
  90#define	NB_PCIE_INDX_DATA	0xe4
  91#define	PCIE_P_CNTL		0x10040
  92#define	BIF_NB			0x10002
  93#define	NB_PIF0_PWRDOWN_0	0x01100012
  94#define	NB_PIF0_PWRDOWN_1	0x01100013
  95
  96#define USB_INTEL_XUSB2PR      0xD0
  97#define USB_INTEL_USB2PRM      0xD4
  98#define USB_INTEL_USB3_PSSEN   0xD8
  99#define USB_INTEL_USB3PRM      0xDC
 100
 101/* ASMEDIA quirk use */
 102#define ASMT_DATA_WRITE0_REG	0xF8
 103#define ASMT_DATA_WRITE1_REG	0xFC
 104#define ASMT_CONTROL_REG	0xE0
 105#define ASMT_CONTROL_WRITE_BIT	0x02
 106#define ASMT_WRITEREG_CMD	0x10423
 107#define ASMT_FLOWCTL_ADDR	0xFA30
 108#define ASMT_FLOWCTL_DATA	0xBA
 109#define ASMT_PSEUDO_DATA	0
 110
 111/*
 112 * amd_chipset_gen values represent AMD different chipset generations
 113 */
 114enum amd_chipset_gen {
 115	NOT_AMD_CHIPSET = 0,
 116	AMD_CHIPSET_SB600,
 117	AMD_CHIPSET_SB700,
 118	AMD_CHIPSET_SB800,
 119	AMD_CHIPSET_HUDSON2,
 120	AMD_CHIPSET_BOLTON,
 121	AMD_CHIPSET_YANGTZE,
 122	AMD_CHIPSET_TAISHAN,
 123	AMD_CHIPSET_UNKNOWN,
 124};
 125
 126struct amd_chipset_type {
 127	enum amd_chipset_gen gen;
 128	u8 rev;
 129};
 130
 131static struct amd_chipset_info {
 132	struct pci_dev	*nb_dev;
 133	struct pci_dev	*smbus_dev;
 134	int nb_type;
 135	struct amd_chipset_type sb_type;
 136	int isoc_reqs;
 137	int probe_count;
 138	bool need_pll_quirk;
 139} amd_chipset;
 140
 141static DEFINE_SPINLOCK(amd_lock);
 142
 143/*
 144 * amd_chipset_sb_type_init - initialize amd chipset southbridge type
 145 *
 146 * AMD FCH/SB generation and revision is identified by SMBus controller
 147 * vendor, device and revision IDs.
 148 *
 149 * Returns: 1 if it is an AMD chipset, 0 otherwise.
 150 */
 151static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
 152{
 153	u8 rev = 0;
 154	pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
 155
 156	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
 157			PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
 158	if (pinfo->smbus_dev) {
 159		rev = pinfo->smbus_dev->revision;
 160		if (rev >= 0x10 && rev <= 0x1f)
 161			pinfo->sb_type.gen = AMD_CHIPSET_SB600;
 162		else if (rev >= 0x30 && rev <= 0x3f)
 163			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
 164		else if (rev >= 0x40 && rev <= 0x4f)
 165			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
 166	} else {
 167		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 168				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
 169
 170		if (pinfo->smbus_dev) {
 171			rev = pinfo->smbus_dev->revision;
 172			if (rev >= 0x11 && rev <= 0x14)
 173				pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
 174			else if (rev >= 0x15 && rev <= 0x18)
 175				pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
 176			else if (rev >= 0x39 && rev <= 0x3a)
 177				pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
 178		} else {
 179			pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 180							  0x145c, NULL);
 181			if (pinfo->smbus_dev) {
 182				rev = pinfo->smbus_dev->revision;
 183				pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
 184			} else {
 185				pinfo->sb_type.gen = NOT_AMD_CHIPSET;
 186				return 0;
 187			}
 188		}
 
 
 
 
 
 
 
 
 189	}
 
 190	pinfo->sb_type.rev = rev;
 191	return 1;
 192}
 193
 194void sb800_prefetch(struct device *dev, int on)
 195{
 196	u16 misc;
 197	struct pci_dev *pdev = to_pci_dev(dev);
 198
 199	pci_read_config_word(pdev, 0x50, &misc);
 200	if (on == 0)
 201		pci_write_config_word(pdev, 0x50, misc & 0xfcff);
 202	else
 203		pci_write_config_word(pdev, 0x50, misc | 0x0300);
 204}
 205EXPORT_SYMBOL_GPL(sb800_prefetch);
 206
 207static void usb_amd_find_chipset_info(void)
 208{
 209	unsigned long flags;
 210	struct amd_chipset_info info;
 211	info.need_pll_quirk = false;
 212
 213	spin_lock_irqsave(&amd_lock, flags);
 214
 215	/* probe only once */
 216	if (amd_chipset.probe_count > 0) {
 217		amd_chipset.probe_count++;
 218		spin_unlock_irqrestore(&amd_lock, flags);
 219		return;
 220	}
 221	memset(&info, 0, sizeof(info));
 222	spin_unlock_irqrestore(&amd_lock, flags);
 223
 224	if (!amd_chipset_sb_type_init(&info)) {
 
 225		goto commit;
 226	}
 227
 228	switch (info.sb_type.gen) {
 229	case AMD_CHIPSET_SB700:
 230		info.need_pll_quirk = info.sb_type.rev <= 0x3B;
 231		break;
 232	case AMD_CHIPSET_SB800:
 233	case AMD_CHIPSET_HUDSON2:
 234	case AMD_CHIPSET_BOLTON:
 235		info.need_pll_quirk = true;
 236		break;
 237	default:
 238		info.need_pll_quirk = false;
 239		break;
 240	}
 241
 242	if (!info.need_pll_quirk) {
 243		if (info.smbus_dev) {
 244			pci_dev_put(info.smbus_dev);
 245			info.smbus_dev = NULL;
 246		}
 
 247		goto commit;
 248	}
 249
 250	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
 251	if (info.nb_dev) {
 252		info.nb_type = 1;
 253	} else {
 254		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
 255		if (info.nb_dev) {
 256			info.nb_type = 2;
 257		} else {
 258			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 259						     0x9600, NULL);
 260			if (info.nb_dev)
 261				info.nb_type = 3;
 262		}
 263	}
 264
 
 265	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
 266
 267commit:
 268
 269	spin_lock_irqsave(&amd_lock, flags);
 270	if (amd_chipset.probe_count > 0) {
 271		/* race - someone else was faster - drop devices */
 272
 273		/* Mark that we where here */
 274		amd_chipset.probe_count++;
 
 275
 276		spin_unlock_irqrestore(&amd_lock, flags);
 277
 278		pci_dev_put(info.nb_dev);
 279		pci_dev_put(info.smbus_dev);
 280
 281	} else {
 282		/* no race - commit the result */
 283		info.probe_count++;
 284		amd_chipset = info;
 285		spin_unlock_irqrestore(&amd_lock, flags);
 286	}
 
 
 287}
 
 288
 289int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
 290{
 291	/* Make sure amd chipset type has already been initialized */
 292	usb_amd_find_chipset_info();
 293	if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
 294	    amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
 295		dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
 296		return 1;
 297	}
 298	return 0;
 299}
 300EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
 301
 302bool usb_amd_hang_symptom_quirk(void)
 303{
 304	u8 rev;
 305
 306	usb_amd_find_chipset_info();
 307	rev = amd_chipset.sb_type.rev;
 308	/* SB600 and old version of SB700 have hang symptom bug */
 309	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
 310			(amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 311			 rev >= 0x3a && rev <= 0x3b);
 312}
 313EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
 314
 315bool usb_amd_prefetch_quirk(void)
 316{
 317	usb_amd_find_chipset_info();
 318	/* SB800 needs pre-fetch fix */
 319	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
 320}
 321EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
 322
 323bool usb_amd_quirk_pll_check(void)
 324{
 325	usb_amd_find_chipset_info();
 326	return amd_chipset.need_pll_quirk;
 327}
 328EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_check);
 329
 330/*
 331 * The hardware normally enables the A-link power management feature, which
 332 * lets the system lower the power consumption in idle states.
 333 *
 334 * This USB quirk prevents the link going into that lower power state
 335 * during isochronous transfers.
 336 *
 337 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
 338 * some AMD platforms may stutter or have breaks occasionally.
 339 */
 340static void usb_amd_quirk_pll(int disable)
 341{
 342	u32 addr, addr_low, addr_high, val;
 343	u32 bit = disable ? 0 : 1;
 344	unsigned long flags;
 345
 346	spin_lock_irqsave(&amd_lock, flags);
 347
 348	if (disable) {
 349		amd_chipset.isoc_reqs++;
 350		if (amd_chipset.isoc_reqs > 1) {
 351			spin_unlock_irqrestore(&amd_lock, flags);
 352			return;
 353		}
 354	} else {
 355		amd_chipset.isoc_reqs--;
 356		if (amd_chipset.isoc_reqs > 0) {
 357			spin_unlock_irqrestore(&amd_lock, flags);
 358			return;
 359		}
 360	}
 361
 362	if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
 363			amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
 364			amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
 365		outb_p(AB_REG_BAR_LOW, 0xcd6);
 366		addr_low = inb_p(0xcd7);
 367		outb_p(AB_REG_BAR_HIGH, 0xcd6);
 368		addr_high = inb_p(0xcd7);
 369		addr = addr_high << 8 | addr_low;
 370
 371		outl_p(0x30, AB_INDX(addr));
 372		outl_p(0x40, AB_DATA(addr));
 373		outl_p(0x34, AB_INDX(addr));
 374		val = inl_p(AB_DATA(addr));
 375	} else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
 376			amd_chipset.sb_type.rev <= 0x3b) {
 377		pci_read_config_dword(amd_chipset.smbus_dev,
 378					AB_REG_BAR_SB700, &addr);
 379		outl(AX_INDXC, AB_INDX(addr));
 380		outl(0x40, AB_DATA(addr));
 381		outl(AX_DATAC, AB_INDX(addr));
 382		val = inl(AB_DATA(addr));
 383	} else {
 384		spin_unlock_irqrestore(&amd_lock, flags);
 385		return;
 386	}
 387
 388	if (disable) {
 389		val &= ~0x08;
 390		val |= (1 << 4) | (1 << 9);
 391	} else {
 392		val |= 0x08;
 393		val &= ~((1 << 4) | (1 << 9));
 394	}
 395	outl_p(val, AB_DATA(addr));
 396
 397	if (!amd_chipset.nb_dev) {
 398		spin_unlock_irqrestore(&amd_lock, flags);
 399		return;
 400	}
 401
 402	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
 403		addr = PCIE_P_CNTL;
 404		pci_write_config_dword(amd_chipset.nb_dev,
 405					NB_PCIE_INDX_ADDR, addr);
 406		pci_read_config_dword(amd_chipset.nb_dev,
 407					NB_PCIE_INDX_DATA, &val);
 408
 409		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
 410		val |= bit | (bit << 3) | (bit << 12);
 411		val |= ((!bit) << 4) | ((!bit) << 9);
 412		pci_write_config_dword(amd_chipset.nb_dev,
 413					NB_PCIE_INDX_DATA, val);
 414
 415		addr = BIF_NB;
 416		pci_write_config_dword(amd_chipset.nb_dev,
 417					NB_PCIE_INDX_ADDR, addr);
 418		pci_read_config_dword(amd_chipset.nb_dev,
 419					NB_PCIE_INDX_DATA, &val);
 420		val &= ~(1 << 8);
 421		val |= bit << 8;
 422
 423		pci_write_config_dword(amd_chipset.nb_dev,
 424					NB_PCIE_INDX_DATA, val);
 425	} else if (amd_chipset.nb_type == 2) {
 426		addr = NB_PIF0_PWRDOWN_0;
 427		pci_write_config_dword(amd_chipset.nb_dev,
 428					NB_PCIE_INDX_ADDR, addr);
 429		pci_read_config_dword(amd_chipset.nb_dev,
 430					NB_PCIE_INDX_DATA, &val);
 431		if (disable)
 432			val &= ~(0x3f << 7);
 433		else
 434			val |= 0x3f << 7;
 435
 436		pci_write_config_dword(amd_chipset.nb_dev,
 437					NB_PCIE_INDX_DATA, val);
 438
 439		addr = NB_PIF0_PWRDOWN_1;
 440		pci_write_config_dword(amd_chipset.nb_dev,
 441					NB_PCIE_INDX_ADDR, addr);
 442		pci_read_config_dword(amd_chipset.nb_dev,
 443					NB_PCIE_INDX_DATA, &val);
 444		if (disable)
 445			val &= ~(0x3f << 7);
 446		else
 447			val |= 0x3f << 7;
 448
 449		pci_write_config_dword(amd_chipset.nb_dev,
 450					NB_PCIE_INDX_DATA, val);
 451	}
 452
 453	spin_unlock_irqrestore(&amd_lock, flags);
 454	return;
 455}
 456
 457void usb_amd_quirk_pll_disable(void)
 458{
 459	usb_amd_quirk_pll(1);
 460}
 461EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
 462
 463static int usb_asmedia_wait_write(struct pci_dev *pdev)
 464{
 465	unsigned long retry_count;
 466	unsigned char value;
 467
 468	for (retry_count = 1000; retry_count > 0; --retry_count) {
 469
 470		pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
 471
 472		if (value == 0xff) {
 473			dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
 474			return -EIO;
 475		}
 476
 477		if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
 478			return 0;
 479
 480		udelay(50);
 481	}
 482
 483	dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
 484	return -ETIMEDOUT;
 485}
 486
 487void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
 488{
 489	if (usb_asmedia_wait_write(pdev) != 0)
 490		return;
 491
 492	/* send command and address to device */
 493	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
 494	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
 495	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
 496
 497	if (usb_asmedia_wait_write(pdev) != 0)
 498		return;
 499
 500	/* send data to device */
 501	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
 502	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
 503	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
 504}
 505EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
 506
 507void usb_amd_quirk_pll_enable(void)
 508{
 509	usb_amd_quirk_pll(0);
 510}
 511EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
 512
 513void usb_amd_dev_put(void)
 514{
 515	struct pci_dev *nb, *smbus;
 516	unsigned long flags;
 517
 518	spin_lock_irqsave(&amd_lock, flags);
 519
 520	amd_chipset.probe_count--;
 521	if (amd_chipset.probe_count > 0) {
 522		spin_unlock_irqrestore(&amd_lock, flags);
 523		return;
 524	}
 525
 526	/* save them to pci_dev_put outside of spinlock */
 527	nb    = amd_chipset.nb_dev;
 528	smbus = amd_chipset.smbus_dev;
 529
 530	amd_chipset.nb_dev = NULL;
 531	amd_chipset.smbus_dev = NULL;
 532	amd_chipset.nb_type = 0;
 533	memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
 534	amd_chipset.isoc_reqs = 0;
 535	amd_chipset.need_pll_quirk = false;
 536
 537	spin_unlock_irqrestore(&amd_lock, flags);
 538
 539	pci_dev_put(nb);
 540	pci_dev_put(smbus);
 541}
 542EXPORT_SYMBOL_GPL(usb_amd_dev_put);
 543
 544/*
 545 * Check if port is disabled in BIOS on AMD Promontory host.
 546 * BIOS Disabled ports may wake on connect/disconnect and need
 547 * driver workaround to keep them disabled.
 548 * Returns true if port is marked disabled.
 549 */
 550bool usb_amd_pt_check_port(struct device *device, int port)
 551{
 552	unsigned char value, port_shift;
 553	struct pci_dev *pdev;
 554	u16 reg;
 555
 556	pdev = to_pci_dev(device);
 557	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR);
 558
 559	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 560	if (value != PT_SIG_1_DATA)
 561		return false;
 562
 563	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR);
 564
 565	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 566	if (value != PT_SIG_2_DATA)
 567		return false;
 568
 569	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR);
 570
 571	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 572	if (value != PT_SIG_3_DATA)
 573		return false;
 574
 575	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR);
 576
 577	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 578	if (value != PT_SIG_4_DATA)
 579		return false;
 580
 581	/* Check disabled port setting, if bit is set port is enabled */
 582	switch (pdev->device) {
 583	case 0x43b9:
 584	case 0x43ba:
 585	/*
 586	 * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba)
 587	 * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0
 588	 * PT4_P2_REG bits[6..0] represents ports 13 to 7
 589	 */
 590		if (port > 6) {
 591			reg = PT4_P2_REG;
 592			port_shift = port - 7;
 593		} else {
 594			reg = PT4_P1_REG;
 595			port_shift = port + 1;
 596		}
 597		break;
 598	case 0x43bb:
 599	/*
 600	 * device is AMD_PROMONTORYA_2(0x43bb)
 601	 * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0
 602	 * PT2_P2_REG bits[5..0] represents ports 9 to 3
 603	 */
 604		if (port > 2) {
 605			reg = PT2_P2_REG;
 606			port_shift = port - 3;
 607		} else {
 608			reg = PT2_P1_REG;
 609			port_shift = port + 5;
 610		}
 611		break;
 612	case 0x43bc:
 613	/*
 614	 * device is AMD_PROMONTORYA_1(0x43bc)
 615	 * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0
 616	 * PT1_P2_REG[5..0] represents ports 9 to 4
 617	 */
 618		if (port > 3) {
 619			reg = PT1_P2_REG;
 620			port_shift = port - 4;
 621		} else {
 622			reg = PT1_P1_REG;
 623			port_shift = port + 4;
 624		}
 625		break;
 626	default:
 627		return false;
 628	}
 629	pci_write_config_word(pdev, PT_ADDR_INDX, reg);
 630	pci_read_config_byte(pdev, PT_READ_INDX, &value);
 631
 632	return !(value & BIT(port_shift));
 633}
 634EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
 635
 636/*
 637 * Make sure the controller is completely inactive, unable to
 638 * generate interrupts or do DMA.
 639 */
 640void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
 641{
 642	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
 643	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
 644	 */
 645	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
 646
 647	/* Reset the HC - this will force us to get a
 648	 * new notification of any already connected
 649	 * ports due to the virtual disconnect that it
 650	 * implies.
 651	 */
 652	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
 653	mb();
 654	udelay(5);
 655	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
 656		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
 657
 658	/* Just to be safe, disable interrupt requests and
 659	 * make sure the controller is stopped.
 660	 */
 661	outw(0, base + UHCI_USBINTR);
 662	outw(0, base + UHCI_USBCMD);
 663}
 664EXPORT_SYMBOL_GPL(uhci_reset_hc);
 665
 666/*
 667 * Initialize a controller that was newly discovered or has just been
 668 * resumed.  In either case we can't be sure of its previous state.
 669 *
 670 * Returns: 1 if the controller was reset, 0 otherwise.
 671 */
 672int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
 673{
 674	u16 legsup;
 675	unsigned int cmd, intr;
 676
 677	/*
 678	 * When restarting a suspended controller, we expect all the
 679	 * settings to be the same as we left them:
 680	 *
 681	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
 682	 *	Controller is stopped and configured with EGSM set;
 683	 *	No interrupts enabled except possibly Resume Detect.
 684	 *
 685	 * If any of these conditions are violated we do a complete reset.
 686	 */
 687	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
 688	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
 689		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
 690				__func__, legsup);
 691		goto reset_needed;
 692	}
 693
 694	cmd = inw(base + UHCI_USBCMD);
 695	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
 696			!(cmd & UHCI_USBCMD_EGSM)) {
 697		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
 698				__func__, cmd);
 699		goto reset_needed;
 700	}
 701
 702	intr = inw(base + UHCI_USBINTR);
 703	if (intr & (~UHCI_USBINTR_RESUME)) {
 704		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
 705				__func__, intr);
 706		goto reset_needed;
 707	}
 708	return 0;
 709
 710reset_needed:
 711	dev_dbg(&pdev->dev, "Performing full reset\n");
 712	uhci_reset_hc(pdev, base);
 713	return 1;
 714}
 715EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
 716
 717static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
 718{
 719	u16 cmd;
 720	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
 721}
 722
 723#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
 724#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
 725
 726static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
 727{
 728	unsigned long base = 0;
 729	int i;
 730
 731	if (!pio_enabled(pdev))
 732		return;
 733
 734	for (i = 0; i < PCI_STD_NUM_BARS; i++)
 735		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
 736			base = pci_resource_start(pdev, i);
 737			break;
 738		}
 739
 740	if (base)
 741		uhci_check_and_reset_hc(pdev, base);
 742}
 743
 744static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
 745{
 746	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
 747}
 748
 749static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
 750{
 751	void __iomem *base;
 752	u32 control;
 753	u32 fminterval = 0;
 754	bool no_fminterval = false;
 755	int cnt;
 756
 757	if (!mmio_resource_enabled(pdev, 0))
 758		return;
 759
 760	base = pci_ioremap_bar(pdev, 0);
 761	if (base == NULL)
 762		return;
 763
 764	/*
 765	 * ULi M5237 OHCI controller locks the whole system when accessing
 766	 * the OHCI_FMINTERVAL offset.
 767	 */
 768	if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
 769		no_fminterval = true;
 770
 771	control = readl(base + OHCI_CONTROL);
 772
 773/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
 774#ifdef __hppa__
 775#define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
 776#else
 777#define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
 778
 779	if (control & OHCI_CTRL_IR) {
 780		int wait_time = 500; /* arbitrary; 5 seconds */
 781		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
 782		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
 783		while (wait_time > 0 &&
 784				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
 785			wait_time -= 10;
 786			msleep(10);
 787		}
 788		if (wait_time <= 0)
 789			dev_warn(&pdev->dev,
 790				 "OHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 791				 readl(base + OHCI_CONTROL));
 792	}
 793#endif
 794
 795	/* disable interrupts */
 796	writel((u32) ~0, base + OHCI_INTRDISABLE);
 797
 798	/* Go into the USB_RESET state, preserving RWC (and possibly IR) */
 799	writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
 800	readl(base + OHCI_CONTROL);
 
 
 
 
 
 
 801
 802	/* software reset of the controller, preserving HcFmInterval */
 803	if (!no_fminterval)
 804		fminterval = readl(base + OHCI_FMINTERVAL);
 805
 806	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
 807
 808	/* reset requires max 10 us delay */
 809	for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
 810		if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
 811			break;
 812		udelay(1);
 813	}
 814
 815	if (!no_fminterval)
 816		writel(fminterval, base + OHCI_FMINTERVAL);
 817
 818	/* Now the controller is safely in SUSPEND and nothing can wake it up */
 819	iounmap(base);
 820}
 821
 822static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
 823	{
 824		/*  Pegatron Lucid (ExoPC) */
 825		.matches = {
 826			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
 827			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
 828		},
 829	},
 830	{
 831		/*  Pegatron Lucid (Ordissimo AIRIS) */
 832		.matches = {
 833			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
 834			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 835		},
 836	},
 837	{
 838		/*  Pegatron Lucid (Ordissimo) */
 839		.matches = {
 840			DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
 841			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
 842		},
 843	},
 844	{
 845		/* HASEE E200 */
 846		.matches = {
 847			DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
 848			DMI_MATCH(DMI_BOARD_NAME, "E210"),
 849			DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
 850		},
 851	},
 852	{ }
 853};
 854
 855static void ehci_bios_handoff(struct pci_dev *pdev,
 856					void __iomem *op_reg_base,
 857					u32 cap, u8 offset)
 858{
 859	int try_handoff = 1, tried_handoff = 0;
 860
 861	/*
 862	 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
 863	 * the handoff on its unused controller.  Skip it.
 864	 *
 865	 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
 866	 */
 867	if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
 868			pdev->device == 0x27cc)) {
 869		if (dmi_check_system(ehci_dmi_nohandoff_table))
 870			try_handoff = 0;
 871	}
 872
 873	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
 874		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
 875
 876#if 0
 877/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
 878 * but that seems dubious in general (the BIOS left it off intentionally)
 879 * and is known to prevent some systems from booting.  so we won't do this
 880 * unless maybe we can determine when we're on a system that needs SMI forced.
 881 */
 882		/* BIOS workaround (?): be sure the pre-Linux code
 883		 * receives the SMI
 884		 */
 885		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
 886		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
 887				       val | EHCI_USBLEGCTLSTS_SOOE);
 888#endif
 889
 890		/* some systems get upset if this semaphore is
 891		 * set for any other reason than forcing a BIOS
 892		 * handoff..
 893		 */
 894		pci_write_config_byte(pdev, offset + 3, 1);
 895	}
 896
 897	/* if boot firmware now owns EHCI, spin till it hands it over. */
 898	if (try_handoff) {
 899		int msec = 1000;
 900		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
 901			tried_handoff = 1;
 902			msleep(10);
 903			msec -= 10;
 904			pci_read_config_dword(pdev, offset, &cap);
 905		}
 906	}
 907
 908	if (cap & EHCI_USBLEGSUP_BIOS) {
 909		/* well, possibly buggy BIOS... try to shut it down,
 910		 * and hope nothing goes too wrong
 911		 */
 912		if (try_handoff)
 913			dev_warn(&pdev->dev,
 914				 "EHCI: BIOS handoff failed (BIOS bug?) %08x\n",
 915				 cap);
 916		pci_write_config_byte(pdev, offset + 2, 0);
 917	}
 918
 919	/* just in case, always disable EHCI SMIs */
 920	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
 921
 922	/* If the BIOS ever owned the controller then we can't expect
 923	 * any power sessions to remain intact.
 924	 */
 925	if (tried_handoff)
 926		writel(0, op_reg_base + EHCI_CONFIGFLAG);
 927}
 928
 929static void quirk_usb_disable_ehci(struct pci_dev *pdev)
 930{
 931	void __iomem *base, *op_reg_base;
 932	u32	hcc_params, cap, val;
 933	u8	offset, cap_length;
 934	int	wait_time, count = 256/4;
 935
 936	if (!mmio_resource_enabled(pdev, 0))
 937		return;
 938
 939	base = pci_ioremap_bar(pdev, 0);
 940	if (base == NULL)
 941		return;
 942
 943	cap_length = readb(base);
 944	op_reg_base = base + cap_length;
 945
 946	/* EHCI 0.96 and later may have "extended capabilities"
 947	 * spec section 5.1 explains the bios handoff, e.g. for
 948	 * booting from USB disk or using a usb keyboard
 949	 */
 950	hcc_params = readl(base + EHCI_HCC_PARAMS);
 951	offset = (hcc_params >> 8) & 0xff;
 952	while (offset && --count) {
 953		pci_read_config_dword(pdev, offset, &cap);
 954
 955		switch (cap & 0xff) {
 956		case 1:
 957			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
 958			break;
 959		case 0: /* Illegal reserved cap, set cap=0 so we exit */
 960			cap = 0;
 961			fallthrough;
 962		default:
 963			dev_warn(&pdev->dev,
 964				 "EHCI: unrecognized capability %02x\n",
 965				 cap & 0xff);
 966		}
 967		offset = (cap >> 8) & 0xff;
 968	}
 969	if (!count)
 970		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
 971
 972	/*
 973	 * halt EHCI & disable its interrupts in any case
 974	 */
 975	val = readl(op_reg_base + EHCI_USBSTS);
 976	if ((val & EHCI_USBSTS_HALTED) == 0) {
 977		val = readl(op_reg_base + EHCI_USBCMD);
 978		val &= ~EHCI_USBCMD_RUN;
 979		writel(val, op_reg_base + EHCI_USBCMD);
 980
 981		wait_time = 2000;
 982		do {
 983			writel(0x3f, op_reg_base + EHCI_USBSTS);
 984			udelay(100);
 985			wait_time -= 100;
 986			val = readl(op_reg_base + EHCI_USBSTS);
 987			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
 988				break;
 989			}
 990		} while (wait_time > 0);
 991	}
 992	writel(0, op_reg_base + EHCI_USBINTR);
 993	writel(0x3f, op_reg_base + EHCI_USBSTS);
 994
 995	iounmap(base);
 996}
 997
 998/*
 999 * handshake - spin reading a register until handshake completes
1000 * @ptr: address of hc register to be read
1001 * @mask: bits to look at in result of read
1002 * @done: value of those bits when handshake succeeds
1003 * @wait_usec: timeout in microseconds
1004 * @delay_usec: delay in microseconds to wait between polling
1005 *
1006 * Polls a register every delay_usec microseconds.
1007 * Returns 0 when the mask bits have the value done.
1008 * Returns -ETIMEDOUT if this condition is not true after
1009 * wait_usec microseconds have passed.
1010 */
1011static int handshake(void __iomem *ptr, u32 mask, u32 done,
1012		int wait_usec, int delay_usec)
1013{
1014	u32	result;
1015
1016	return readl_poll_timeout_atomic(ptr, result,
1017					 ((result & mask) == done),
1018					 delay_usec, wait_usec);
 
 
 
 
 
 
1019}
1020
1021/*
1022 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
1023 * share some number of ports.  These ports can be switched between either
1024 * controller.  Not all of the ports under the EHCI host controller may be
1025 * switchable.
1026 *
1027 * The ports should be switched over to xHCI before PCI probes for any device
1028 * start.  This avoids active devices under EHCI being disconnected during the
1029 * port switchover, which could cause loss of data on USB storage devices, or
1030 * failed boot when the root file system is on a USB mass storage device and is
1031 * enumerated under EHCI first.
1032 *
1033 * We write into the xHC's PCI configuration space in some Intel-specific
1034 * registers to switch the ports over.  The USB 3.0 terminations and the USB
1035 * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
1036 * terminations before switching the USB 2.0 wires over, so that USB 3.0
1037 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
1038 */
1039void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
1040{
1041	u32		ports_available;
1042	bool		ehci_found = false;
1043	struct pci_dev	*companion = NULL;
1044
1045	/* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
1046	 * switching ports from EHCI to xHCI
1047	 */
1048	if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
1049	    xhci_pdev->subsystem_device == 0x90a8)
1050		return;
1051
1052	/* make sure an intel EHCI controller exists */
1053	for_each_pci_dev(companion) {
1054		if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
1055		    companion->vendor == PCI_VENDOR_ID_INTEL) {
1056			ehci_found = true;
1057			break;
1058		}
1059	}
1060
1061	if (!ehci_found)
1062		return;
1063
1064	/* Don't switchover the ports if the user hasn't compiled the xHCI
1065	 * driver.  Otherwise they will see "dead" USB ports that don't power
1066	 * the devices.
1067	 */
1068	if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
1069		dev_warn(&xhci_pdev->dev,
1070			 "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n");
1071		dev_warn(&xhci_pdev->dev,
1072				"USB 3.0 devices will work at USB 2.0 speeds.\n");
1073		usb_disable_xhci_ports(xhci_pdev);
1074		return;
1075	}
1076
1077	/* Read USB3PRM, the USB 3.0 Port Routing Mask Register
1078	 * Indicate the ports that can be changed from OS.
1079	 */
1080	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
1081			&ports_available);
1082
1083	dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
1084			ports_available);
1085
1086	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
1087	 * Register, to turn on SuperSpeed terminations for the
1088	 * switchable ports.
1089	 */
1090	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
1091			ports_available);
1092
1093	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
1094			&ports_available);
1095	dev_dbg(&xhci_pdev->dev,
1096		"USB 3.0 ports that are now enabled under xHCI: 0x%x\n",
1097		ports_available);
1098
1099	/* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
1100	 * Indicate the USB 2.0 ports to be controlled by the xHCI host.
1101	 */
1102
1103	pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
1104			&ports_available);
1105
1106	dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
1107			ports_available);
1108
1109	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
1110	 * switch the USB 2.0 power and data lines over to the xHCI
1111	 * host.
1112	 */
1113	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
1114			ports_available);
1115
1116	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
1117			&ports_available);
1118	dev_dbg(&xhci_pdev->dev,
1119		"USB 2.0 ports that are now switched over to xHCI: 0x%x\n",
1120		ports_available);
1121}
1122EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
1123
1124void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
1125{
1126	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
1127	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
1128}
1129EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
1130
1131/*
1132 * PCI Quirks for xHCI.
1133 *
1134 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
1135 * It signals to the BIOS that the OS wants control of the host controller,
1136 * and then waits 1 second for the BIOS to hand over control.
1137 * If we timeout, assume the BIOS is broken and take control anyway.
1138 */
1139static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
1140{
1141	void __iomem *base;
1142	int ext_cap_offset;
1143	void __iomem *op_reg_base;
1144	u32 val;
1145	int timeout;
1146	int len = pci_resource_len(pdev, 0);
1147
1148	if (!mmio_resource_enabled(pdev, 0))
1149		return;
1150
1151	base = ioremap(pci_resource_start(pdev, 0), len);
1152	if (base == NULL)
1153		return;
1154
1155	/*
1156	 * Find the Legacy Support Capability register -
1157	 * this is optional for xHCI host controllers.
1158	 */
1159	ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY);
1160
1161	if (!ext_cap_offset)
1162		goto hc_init;
1163
1164	if ((ext_cap_offset + sizeof(val)) > len) {
1165		/* We're reading garbage from the controller */
1166		dev_warn(&pdev->dev, "xHCI controller failing to respond");
1167		goto iounmap;
1168	}
1169	val = readl(base + ext_cap_offset);
1170
1171	/* Auto handoff never worked for these devices. Force it and continue */
1172	if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) ||
1173			(pdev->vendor == PCI_VENDOR_ID_RENESAS
1174			 && pdev->device == 0x0014)) {
1175		val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED;
1176		writel(val, base + ext_cap_offset);
1177	}
1178
1179	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
1180	if (val & XHCI_HC_BIOS_OWNED) {
1181		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
1182
1183		/* Wait for 1 second with 10 microsecond polling interval */
1184		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
1185				0, 1000000, 10);
1186
1187		/* Assume a buggy BIOS and take HC ownership anyway */
1188		if (timeout) {
1189			dev_warn(&pdev->dev,
1190				 "xHCI BIOS handoff failed (BIOS bug ?) %08x\n",
1191				 val);
1192			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
1193		}
1194	}
1195
1196	val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1197	/* Mask off (turn off) any enabled SMIs */
1198	val &= XHCI_LEGACY_DISABLE_SMI;
1199	/* Mask all SMI events bits, RW1C */
1200	val |= XHCI_LEGACY_SMI_EVENTS;
1201	/* Disable any BIOS SMIs and clear all SMI events*/
1202	writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1203
1204hc_init:
1205	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
1206		usb_enable_intel_xhci_ports(pdev);
1207
1208	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
1209
1210	/* Wait for the host controller to be ready before writing any
1211	 * operational or runtime registers.  Wait 5 seconds and no more.
1212	 */
1213	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
1214			5000000, 10);
1215	/* Assume a buggy HC and start HC initialization anyway */
1216	if (timeout) {
1217		val = readl(op_reg_base + XHCI_STS_OFFSET);
1218		dev_warn(&pdev->dev,
1219			 "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n",
1220			 val);
1221	}
1222
1223	/* Send the halt and disable interrupts command */
1224	val = readl(op_reg_base + XHCI_CMD_OFFSET);
1225	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
1226	writel(val, op_reg_base + XHCI_CMD_OFFSET);
1227
1228	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
1229	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
1230			XHCI_MAX_HALT_USEC, 125);
1231	if (timeout) {
1232		val = readl(op_reg_base + XHCI_STS_OFFSET);
1233		dev_warn(&pdev->dev,
1234			 "xHCI HW did not halt within %d usec status = 0x%x\n",
1235			 XHCI_MAX_HALT_USEC, val);
1236	}
1237
1238iounmap:
1239	iounmap(base);
1240}
1241
1242static void quirk_usb_early_handoff(struct pci_dev *pdev)
1243{
1244	struct device_node *parent;
1245	bool is_rpi;
1246
1247	/* Skip Netlogic mips SoC's internal PCI USB controller.
1248	 * This device does not need/support EHCI/OHCI handoff
1249	 */
1250	if (pdev->vendor == 0x184e)	/* vendor Netlogic */
1251		return;
1252
1253	/*
1254	 * Bypass the Raspberry Pi 4 controller xHCI controller, things are
1255	 * taken care of by the board's co-processor.
1256	 */
1257	if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
1258		parent = of_get_parent(pdev->bus->dev.of_node);
1259		is_rpi = of_device_is_compatible(parent, "brcm,bcm2711-pcie");
1260		of_node_put(parent);
1261		if (is_rpi)
1262			return;
1263	}
1264
1265	if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
1266			pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
1267			pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
1268			pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
1269		return;
1270
1271	if (pci_enable_device(pdev) < 0) {
1272		dev_warn(&pdev->dev,
1273			 "Can't enable PCI device, BIOS handoff failed.\n");
1274		return;
1275	}
1276	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
1277		quirk_usb_handoff_uhci(pdev);
1278	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
1279		quirk_usb_handoff_ohci(pdev);
1280	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
1281		quirk_usb_disable_ehci(pdev);
1282	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
1283		quirk_usb_handoff_xhci(pdev);
1284	pci_disable_device(pdev);
1285}
1286DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1287			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);