Linux Audio

Check our new training course

Loading...
v3.15
 
 
   1/*
   2 * xHCI host controller driver
   3 *
   4 * Copyright (C) 2008 Intel Corp.
   5 *
   6 * Author: Sarah Sharp
   7 * Some code borrowed from the Linux EHCI driver.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  16 * for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software Foundation,
  20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23#ifndef __LINUX_XHCI_HCD_H
  24#define __LINUX_XHCI_HCD_H
  25
  26#include <linux/usb.h>
  27#include <linux/timer.h>
  28#include <linux/kernel.h>
  29#include <linux/usb/hcd.h>
 
 
  30
  31/* Code sharing between pci-quirks and xhci hcd */
  32#include	"xhci-ext-caps.h"
  33#include "pci-quirks.h"
  34
 
 
 
 
 
 
  35/* xHCI PCI Configuration Registers */
  36#define XHCI_SBRN_OFFSET	(0x60)
  37
  38/* Max number of USB devices for any host controller - limit in section 6.1 */
  39#define MAX_HC_SLOTS		256
  40/* Section 5.3.3 - MaxPorts */
  41#define MAX_HC_PORTS		127
  42
  43/*
  44 * xHCI register interface.
  45 * This corresponds to the eXtensible Host Controller Interface (xHCI)
  46 * Revision 0.95 specification
  47 */
  48
  49/**
  50 * struct xhci_cap_regs - xHCI Host Controller Capability Registers.
  51 * @hc_capbase:		length of the capabilities register and HC version number
  52 * @hcs_params1:	HCSPARAMS1 - Structural Parameters 1
  53 * @hcs_params2:	HCSPARAMS2 - Structural Parameters 2
  54 * @hcs_params3:	HCSPARAMS3 - Structural Parameters 3
  55 * @hcc_params:		HCCPARAMS - Capability Parameters
  56 * @db_off:		DBOFF - Doorbell array offset
  57 * @run_regs_off:	RTSOFF - Runtime register space offset
 
  58 */
  59struct xhci_cap_regs {
  60	__le32	hc_capbase;
  61	__le32	hcs_params1;
  62	__le32	hcs_params2;
  63	__le32	hcs_params3;
  64	__le32	hcc_params;
  65	__le32	db_off;
  66	__le32	run_regs_off;
 
  67	/* Reserved up to (CAPLENGTH - 0x1C) */
  68};
  69
  70/* hc_capbase bitmasks */
  71/* bits 7:0 - how long is the Capabilities register */
  72#define HC_LENGTH(p)		XHCI_HC_LENGTH(p)
  73/* bits 31:16	*/
  74#define HC_VERSION(p)		(((p) >> 16) & 0xffff)
  75
  76/* HCSPARAMS1 - hcs_params1 - bitmasks */
  77/* bits 0:7, Max Device Slots */
  78#define HCS_MAX_SLOTS(p)	(((p) >> 0) & 0xff)
  79#define HCS_SLOTS_MASK		0xff
  80/* bits 8:18, Max Interrupters */
  81#define HCS_MAX_INTRS(p)	(((p) >> 8) & 0x7ff)
  82/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
  83#define HCS_MAX_PORTS(p)	(((p) >> 24) & 0x7f)
  84
  85/* HCSPARAMS2 - hcs_params2 - bitmasks */
  86/* bits 0:3, frames or uframes that SW needs to queue transactions
  87 * ahead of the HW to meet periodic deadlines */
  88#define HCS_IST(p)		(((p) >> 0) & 0xf)
  89/* bits 4:7, max number of Event Ring segments */
  90#define HCS_ERST_MAX(p)		(((p) >> 4) & 0xf)
  91/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
  92/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
  93#define HCS_MAX_SCRATCHPAD(p)   (((p) >> 27) & 0x1f)
  94
  95/* HCSPARAMS3 - hcs_params3 - bitmasks */
  96/* bits 0:7, Max U1 to U0 latency for the roothub ports */
  97#define HCS_U1_LATENCY(p)	(((p) >> 0) & 0xff)
  98/* bits 16:31, Max U2 to U0 latency for the roothub ports */
  99#define HCS_U2_LATENCY(p)	(((p) >> 16) & 0xffff)
 100
 101/* HCCPARAMS - hcc_params - bitmasks */
 102/* true: HC can use 64-bit address pointers */
 103#define HCC_64BIT_ADDR(p)	((p) & (1 << 0))
 104/* true: HC can do bandwidth negotiation */
 105#define HCC_BANDWIDTH_NEG(p)	((p) & (1 << 1))
 106/* true: HC uses 64-byte Device Context structures
 107 * FIXME 64-byte context structures aren't supported yet.
 108 */
 109#define HCC_64BYTE_CONTEXT(p)	((p) & (1 << 2))
 110/* true: HC has port power switches */
 111#define HCC_PPC(p)		((p) & (1 << 3))
 112/* true: HC has port indicators */
 113#define HCS_INDICATOR(p)	((p) & (1 << 4))
 114/* true: HC has Light HC Reset Capability */
 115#define HCC_LIGHT_RESET(p)	((p) & (1 << 5))
 116/* true: HC supports latency tolerance messaging */
 117#define HCC_LTC(p)		((p) & (1 << 6))
 118/* true: no secondary Stream ID Support */
 119#define HCC_NSS(p)		((p) & (1 << 7))
 120/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
 121#define HCC_MAX_PSA(p)		(1 << ((((p) >> 12) & 0xf) + 1))
 122/* Extended Capabilities pointer from PCI base - section 5.3.6 */
 123#define HCC_EXT_CAPS(p)		XHCI_HCC_EXT_CAPS(p)
 124
 125/* db_off bitmask - bits 0:1 reserved */
 126#define	DBOFF_MASK	(~0x3)
 127
 128/* run_regs_off bitmask - bits 0:4 reserved */
 129#define	RTSOFF_MASK	(~0x1f)
 130
 131
 132/* Number of registers per port */
 133#define	NUM_PORT_REGS	4
 134
 135#define PORTSC		0
 136#define PORTPMSC	1
 137#define PORTLI		2
 138#define PORTHLPMC	3
 139
 140/**
 141 * struct xhci_op_regs - xHCI Host Controller Operational Registers.
 142 * @command:		USBCMD - xHC command register
 143 * @status:		USBSTS - xHC status register
 144 * @page_size:		This indicates the page size that the host controller
 145 * 			supports.  If bit n is set, the HC supports a page size
 146 * 			of 2^(n+12), up to a 128MB page size.
 147 * 			4K is the minimum page size.
 148 * @cmd_ring:		CRP - 64-bit Command Ring Pointer
 149 * @dcbaa_ptr:		DCBAAP - 64-bit Device Context Base Address Array Pointer
 150 * @config_reg:		CONFIG - Configure Register
 151 * @port_status_base:	PORTSCn - base address for Port Status and Control
 152 * 			Each port has a Port Status and Control register,
 153 * 			followed by a Port Power Management Status and Control
 154 * 			register, a Port Link Info register, and a reserved
 155 * 			register.
 156 * @port_power_base:	PORTPMSCn - base address for
 157 * 			Port Power Management Status and Control
 158 * @port_link_base:	PORTLIn - base address for Port Link Info (current
 159 * 			Link PM state and control) for USB 2.1 and USB 3.0
 160 * 			devices.
 161 */
 162struct xhci_op_regs {
 163	__le32	command;
 164	__le32	status;
 165	__le32	page_size;
 166	__le32	reserved1;
 167	__le32	reserved2;
 168	__le32	dev_notification;
 169	__le64	cmd_ring;
 170	/* rsvd: offset 0x20-2F */
 171	__le32	reserved3[4];
 172	__le64	dcbaa_ptr;
 173	__le32	config_reg;
 174	/* rsvd: offset 0x3C-3FF */
 175	__le32	reserved4[241];
 176	/* port 1 registers, which serve as a base address for other ports */
 177	__le32	port_status_base;
 178	__le32	port_power_base;
 179	__le32	port_link_base;
 180	__le32	reserved5;
 181	/* registers for ports 2-255 */
 182	__le32	reserved6[NUM_PORT_REGS*254];
 183};
 184
 185/* USBCMD - USB command - command bitmasks */
 186/* start/stop HC execution - do not write unless HC is halted*/
 187#define CMD_RUN		XHCI_CMD_RUN
 188/* Reset HC - resets internal HC state machine and all registers (except
 189 * PCI config regs).  HC does NOT drive a USB reset on the downstream ports.
 190 * The xHCI driver must reinitialize the xHC after setting this bit.
 191 */
 192#define CMD_RESET	(1 << 1)
 193/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
 194#define CMD_EIE		XHCI_CMD_EIE
 195/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
 196#define CMD_HSEIE	XHCI_CMD_HSEIE
 197/* bits 4:6 are reserved (and should be preserved on writes). */
 198/* light reset (port status stays unchanged) - reset completed when this is 0 */
 199#define CMD_LRESET	(1 << 7)
 200/* host controller save/restore state. */
 201#define CMD_CSS		(1 << 8)
 202#define CMD_CRS		(1 << 9)
 203/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
 204#define CMD_EWE		XHCI_CMD_EWE
 205/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
 206 * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
 207 * '0' means the xHC can power it off if all ports are in the disconnect,
 208 * disabled, or powered-off state.
 209 */
 210#define CMD_PM_INDEX	(1 << 11)
 211/* bits 12:31 are reserved (and should be preserved on writes). */
 
 
 
 
 
 212
 213/* IMAN - Interrupt Management Register */
 214#define IMAN_IE		(1 << 1)
 215#define IMAN_IP		(1 << 0)
 216
 217/* USBSTS - USB status - status bitmasks */
 218/* HC not running - set to 1 when run/stop bit is cleared. */
 219#define STS_HALT	XHCI_STS_HALT
 220/* serious error, e.g. PCI parity error.  The HC will clear the run/stop bit. */
 221#define STS_FATAL	(1 << 2)
 222/* event interrupt - clear this prior to clearing any IP flags in IR set*/
 223#define STS_EINT	(1 << 3)
 224/* port change detect */
 225#define STS_PORT	(1 << 4)
 226/* bits 5:7 reserved and zeroed */
 227/* save state status - '1' means xHC is saving state */
 228#define STS_SAVE	(1 << 8)
 229/* restore state status - '1' means xHC is restoring state */
 230#define STS_RESTORE	(1 << 9)
 231/* true: save or restore error */
 232#define STS_SRE		(1 << 10)
 233/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
 234#define STS_CNR		XHCI_STS_CNR
 235/* true: internal Host Controller Error - SW needs to reset and reinitialize */
 236#define STS_HCE		(1 << 12)
 237/* bits 13:31 reserved and should be preserved */
 238
 239/*
 240 * DNCTRL - Device Notification Control Register - dev_notification bitmasks
 241 * Generate a device notification event when the HC sees a transaction with a
 242 * notification type that matches a bit set in this bit field.
 243 */
 244#define	DEV_NOTE_MASK		(0xffff)
 245#define ENABLE_DEV_NOTE(x)	(1 << (x))
 246/* Most of the device notification types should only be used for debug.
 247 * SW does need to pay attention to function wake notifications.
 248 */
 249#define	DEV_NOTE_FWAKE		ENABLE_DEV_NOTE(1)
 250
 251/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
 252/* bit 0 is the command ring cycle state */
 253/* stop ring operation after completion of the currently executing command */
 254#define CMD_RING_PAUSE		(1 << 1)
 255/* stop ring immediately - abort the currently executing command */
 256#define CMD_RING_ABORT		(1 << 2)
 257/* true: command ring is running */
 258#define CMD_RING_RUNNING	(1 << 3)
 259/* bits 4:5 reserved and should be preserved */
 260/* Command Ring pointer - bit mask for the lower 32 bits. */
 261#define CMD_RING_RSVD_BITS	(0x3f)
 262
 263/* CONFIG - Configure Register - config_reg bitmasks */
 264/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
 265#define MAX_DEVS(p)	((p) & 0xff)
 266/* bits 8:31 - reserved and should be preserved */
 267
 268/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
 269/* true: device connected */
 270#define PORT_CONNECT	(1 << 0)
 271/* true: port enabled */
 272#define PORT_PE		(1 << 1)
 273/* bit 2 reserved and zeroed */
 274/* true: port has an over-current condition */
 275#define PORT_OC		(1 << 3)
 276/* true: port reset signaling asserted */
 277#define PORT_RESET	(1 << 4)
 278/* Port Link State - bits 5:8
 279 * A read gives the current link PM state of the port,
 280 * a write with Link State Write Strobe set sets the link state.
 281 */
 282#define PORT_PLS_MASK	(0xf << 5)
 283#define XDEV_U0		(0x0 << 5)
 284#define XDEV_U2		(0x2 << 5)
 285#define XDEV_U3		(0x3 << 5)
 286#define XDEV_RESUME	(0xf << 5)
 287/* true: port has power (see HCC_PPC) */
 288#define PORT_POWER	(1 << 9)
 289/* bits 10:13 indicate device speed:
 290 * 0 - undefined speed - port hasn't be initialized by a reset yet
 291 * 1 - full speed
 292 * 2 - low speed
 293 * 3 - high speed
 294 * 4 - super speed
 295 * 5-15 reserved
 296 */
 297#define DEV_SPEED_MASK		(0xf << 10)
 298#define	XDEV_FS			(0x1 << 10)
 299#define	XDEV_LS			(0x2 << 10)
 300#define	XDEV_HS			(0x3 << 10)
 301#define	XDEV_SS			(0x4 << 10)
 302#define DEV_UNDEFSPEED(p)	(((p) & DEV_SPEED_MASK) == (0x0<<10))
 303#define DEV_FULLSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_FS)
 304#define DEV_LOWSPEED(p)		(((p) & DEV_SPEED_MASK) == XDEV_LS)
 305#define DEV_HIGHSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_HS)
 306#define DEV_SUPERSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_SS)
 307/* Bits 20:23 in the Slot Context are the speed for the device */
 308#define	SLOT_SPEED_FS		(XDEV_FS << 10)
 309#define	SLOT_SPEED_LS		(XDEV_LS << 10)
 310#define	SLOT_SPEED_HS		(XDEV_HS << 10)
 311#define	SLOT_SPEED_SS		(XDEV_SS << 10)
 312/* Port Indicator Control */
 313#define PORT_LED_OFF	(0 << 14)
 314#define PORT_LED_AMBER	(1 << 14)
 315#define PORT_LED_GREEN	(2 << 14)
 316#define PORT_LED_MASK	(3 << 14)
 317/* Port Link State Write Strobe - set this when changing link state */
 318#define PORT_LINK_STROBE	(1 << 16)
 319/* true: connect status change */
 320#define PORT_CSC	(1 << 17)
 321/* true: port enable change */
 322#define PORT_PEC	(1 << 18)
 323/* true: warm reset for a USB 3.0 device is done.  A "hot" reset puts the port
 324 * into an enabled state, and the device into the default state.  A "warm" reset
 325 * also resets the link, forcing the device through the link training sequence.
 326 * SW can also look at the Port Reset register to see when warm reset is done.
 327 */
 328#define PORT_WRC	(1 << 19)
 329/* true: over-current change */
 330#define PORT_OCC	(1 << 20)
 331/* true: reset change - 1 to 0 transition of PORT_RESET */
 332#define PORT_RC		(1 << 21)
 333/* port link status change - set on some port link state transitions:
 334 *  Transition				Reason
 335 *  ------------------------------------------------------------------------------
 336 *  - U3 to Resume			Wakeup signaling from a device
 337 *  - Resume to Recovery to U0		USB 3.0 device resume
 338 *  - Resume to U0			USB 2.0 device resume
 339 *  - U3 to Recovery to U0		Software resume of USB 3.0 device complete
 340 *  - U3 to U0				Software resume of USB 2.0 device complete
 341 *  - U2 to U0				L1 resume of USB 2.1 device complete
 342 *  - U0 to U0 (???)			L1 entry rejection by USB 2.1 device
 343 *  - U0 to disabled			L1 entry error with USB 2.1 device
 344 *  - Any state to inactive		Error on USB 3.0 port
 345 */
 346#define PORT_PLC	(1 << 22)
 347/* port configure error change - port failed to configure its link partner */
 348#define PORT_CEC	(1 << 23)
 349/* Cold Attach Status - xHC can set this bit to report device attached during
 350 * Sx state. Warm port reset should be perfomed to clear this bit and move port
 351 * to connected state.
 352 */
 353#define PORT_CAS	(1 << 24)
 354/* wake on connect (enable) */
 355#define PORT_WKCONN_E	(1 << 25)
 356/* wake on disconnect (enable) */
 357#define PORT_WKDISC_E	(1 << 26)
 358/* wake on over-current (enable) */
 359#define PORT_WKOC_E	(1 << 27)
 360/* bits 28:29 reserved */
 361/* true: device is removable - for USB 3.0 roothub emulation */
 362#define PORT_DEV_REMOVE	(1 << 30)
 363/* Initiate a warm port reset - complete when PORT_WRC is '1' */
 364#define PORT_WR		(1 << 31)
 365
 366/* We mark duplicate entries with -1 */
 367#define DUPLICATE_ENTRY ((u8)(-1))
 368
 369/* Port Power Management Status and Control - port_power_base bitmasks */
 370/* Inactivity timer value for transitions into U1, in microseconds.
 371 * Timeout can be up to 127us.  0xFF means an infinite timeout.
 372 */
 373#define PORT_U1_TIMEOUT(p)	((p) & 0xff)
 374#define PORT_U1_TIMEOUT_MASK	0xff
 375/* Inactivity timer value for transitions into U2 */
 376#define PORT_U2_TIMEOUT(p)	(((p) & 0xff) << 8)
 377#define PORT_U2_TIMEOUT_MASK	(0xff << 8)
 378/* Bits 24:31 for port testing */
 379
 380/* USB2 Protocol PORTSPMSC */
 381#define	PORT_L1S_MASK		7
 382#define	PORT_L1S_SUCCESS	1
 383#define	PORT_RWE		(1 << 3)
 384#define	PORT_HIRD(p)		(((p) & 0xf) << 4)
 385#define	PORT_HIRD_MASK		(0xf << 4)
 386#define	PORT_L1DS_MASK		(0xff << 8)
 387#define	PORT_L1DS(p)		(((p) & 0xff) << 8)
 388#define	PORT_HLE		(1 << 16)
 389
 390
 391/* USB2 Protocol PORTHLPMC */
 392#define PORT_HIRDM(p)((p) & 3)
 393#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
 394#define PORT_BESLD(p)(((p) & 0xf) << 10)
 395
 396/* use 512 microseconds as USB2 LPM L1 default timeout. */
 397#define XHCI_L1_TIMEOUT		512
 398
 399/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
 400 * Safe to use with mixed HIRD and BESL systems (host and device) and is used
 401 * by other operating systems.
 402 *
 403 * XHCI 1.0 errata 8/14/12 Table 13 notes:
 404 * "Software should choose xHC BESL/BESLD field values that do not violate a
 405 * device's resume latency requirements,
 406 * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
 407 * or not program values < '4' if BLC = '0' and a BESL device is attached.
 408 */
 409#define XHCI_DEFAULT_BESL	4
 410
 411/**
 412 * struct xhci_intr_reg - Interrupt Register Set
 413 * @irq_pending:	IMAN - Interrupt Management Register.  Used to enable
 414 *			interrupts and check for pending interrupts.
 415 * @irq_control:	IMOD - Interrupt Moderation Register.
 416 * 			Used to throttle interrupts.
 417 * @erst_size:		Number of segments in the Event Ring Segment Table (ERST).
 418 * @erst_base:		ERST base address.
 419 * @erst_dequeue:	Event ring dequeue pointer.
 420 *
 421 * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
 422 * Ring Segment Table (ERST) associated with it.  The event ring is comprised of
 423 * multiple segments of the same size.  The HC places events on the ring and
 424 * "updates the Cycle bit in the TRBs to indicate to software the current
 425 * position of the Enqueue Pointer." The HCD (Linux) processes those events and
 426 * updates the dequeue pointer.
 427 */
 428struct xhci_intr_reg {
 429	__le32	irq_pending;
 430	__le32	irq_control;
 431	__le32	erst_size;
 432	__le32	rsvd;
 433	__le64	erst_base;
 434	__le64	erst_dequeue;
 435};
 436
 437/* irq_pending bitmasks */
 438#define	ER_IRQ_PENDING(p)	((p) & 0x1)
 439/* bits 2:31 need to be preserved */
 440/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
 441#define	ER_IRQ_CLEAR(p)		((p) & 0xfffffffe)
 442#define	ER_IRQ_ENABLE(p)	((ER_IRQ_CLEAR(p)) | 0x2)
 443#define	ER_IRQ_DISABLE(p)	((ER_IRQ_CLEAR(p)) & ~(0x2))
 444
 445/* irq_control bitmasks */
 446/* Minimum interval between interrupts (in 250ns intervals).  The interval
 447 * between interrupts will be longer if there are no events on the event ring.
 448 * Default is 4000 (1 ms).
 449 */
 450#define ER_IRQ_INTERVAL_MASK	(0xffff)
 451/* Counter used to count down the time to the next interrupt - HW use only */
 452#define ER_IRQ_COUNTER_MASK	(0xffff << 16)
 453
 454/* erst_size bitmasks */
 455/* Preserve bits 16:31 of erst_size */
 456#define	ERST_SIZE_MASK		(0xffff << 16)
 457
 
 
 
 458/* erst_dequeue bitmasks */
 459/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
 460 * where the current dequeue pointer lies.  This is an optional HW hint.
 461 */
 462#define ERST_DESI_MASK		(0x7)
 463/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
 464 * a work queue (or delayed service routine)?
 465 */
 466#define ERST_EHB		(1 << 3)
 467#define ERST_PTR_MASK		(0xf)
 468
 469/**
 470 * struct xhci_run_regs
 471 * @microframe_index:
 472 * 		MFINDEX - current microframe number
 473 *
 474 * Section 5.5 Host Controller Runtime Registers:
 475 * "Software should read and write these registers using only Dword (32 bit)
 476 * or larger accesses"
 477 */
 478struct xhci_run_regs {
 479	__le32			microframe_index;
 480	__le32			rsvd[7];
 481	struct xhci_intr_reg	ir_set[128];
 482};
 483
 484/**
 485 * struct doorbell_array
 486 *
 487 * Bits  0 -  7: Endpoint target
 488 * Bits  8 - 15: RsvdZ
 489 * Bits 16 - 31: Stream ID
 490 *
 491 * Section 5.6
 492 */
 493struct xhci_doorbell_array {
 494	__le32	doorbell[256];
 495};
 496
 497#define DB_VALUE(ep, stream)	((((ep) + 1) & 0xff) | ((stream) << 16))
 498#define DB_VALUE_HOST		0x00000000
 499
 500/**
 501 * struct xhci_protocol_caps
 502 * @revision:		major revision, minor revision, capability ID,
 503 *			and next capability pointer.
 504 * @name_string:	Four ASCII characters to say which spec this xHC
 505 *			follows, typically "USB ".
 506 * @port_info:		Port offset, count, and protocol-defined information.
 507 */
 508struct xhci_protocol_caps {
 509	u32	revision;
 510	u32	name_string;
 511	u32	port_info;
 512};
 513
 514#define	XHCI_EXT_PORT_MAJOR(x)	(((x) >> 24) & 0xff)
 515#define	XHCI_EXT_PORT_OFF(x)	((x) & 0xff)
 516#define	XHCI_EXT_PORT_COUNT(x)	(((x) >> 8) & 0xff)
 517
 518/**
 519 * struct xhci_container_ctx
 520 * @type: Type of context.  Used to calculated offsets to contained contexts.
 521 * @size: Size of the context data
 522 * @bytes: The raw context data given to HW
 523 * @dma: dma address of the bytes
 524 *
 525 * Represents either a Device or Input context.  Holds a pointer to the raw
 526 * memory used for the context (bytes) and dma address of it (dma).
 527 */
 528struct xhci_container_ctx {
 529	unsigned type;
 530#define XHCI_CTX_TYPE_DEVICE  0x1
 531#define XHCI_CTX_TYPE_INPUT   0x2
 532
 533	int size;
 534
 535	u8 *bytes;
 536	dma_addr_t dma;
 537};
 538
 539/**
 540 * struct xhci_slot_ctx
 541 * @dev_info:	Route string, device speed, hub info, and last valid endpoint
 542 * @dev_info2:	Max exit latency for device number, root hub port number
 543 * @tt_info:	tt_info is used to construct split transaction tokens
 544 * @dev_state:	slot state and device address
 545 *
 546 * Slot Context - section 6.2.1.1.  This assumes the HC uses 32-byte context
 547 * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
 548 * reserved at the end of the slot context for HC internal use.
 549 */
 550struct xhci_slot_ctx {
 551	__le32	dev_info;
 552	__le32	dev_info2;
 553	__le32	tt_info;
 554	__le32	dev_state;
 555	/* offset 0x10 to 0x1f reserved for HC internal use */
 556	__le32	reserved[4];
 557};
 558
 559/* dev_info bitmasks */
 560/* Route String - 0:19 */
 561#define ROUTE_STRING_MASK	(0xfffff)
 562/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
 563#define DEV_SPEED	(0xf << 20)
 
 564/* bit 24 reserved */
 565/* Is this LS/FS device connected through a HS hub? - bit 25 */
 566#define DEV_MTT		(0x1 << 25)
 567/* Set if the device is a hub - bit 26 */
 568#define DEV_HUB		(0x1 << 26)
 569/* Index of the last valid endpoint context in this device context - 27:31 */
 570#define LAST_CTX_MASK	(0x1f << 27)
 571#define LAST_CTX(p)	((p) << 27)
 572#define LAST_CTX_TO_EP_NUM(p)	(((p) >> 27) - 1)
 573#define SLOT_FLAG	(1 << 0)
 574#define EP0_FLAG	(1 << 1)
 575
 576/* dev_info2 bitmasks */
 577/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
 578#define MAX_EXIT	(0xffff)
 579/* Root hub port number that is needed to access the USB device */
 580#define ROOT_HUB_PORT(p)	(((p) & 0xff) << 16)
 581#define DEVINFO_TO_ROOT_HUB_PORT(p)	(((p) >> 16) & 0xff)
 582/* Maximum number of ports under a hub device */
 583#define XHCI_MAX_PORTS(p)	(((p) & 0xff) << 24)
 
 584
 585/* tt_info bitmasks */
 586/*
 587 * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
 588 * The Slot ID of the hub that isolates the high speed signaling from
 589 * this low or full-speed device.  '0' if attached to root hub port.
 590 */
 591#define TT_SLOT		(0xff)
 592/*
 593 * The number of the downstream facing port of the high-speed hub
 594 * '0' if the device is not low or full speed.
 595 */
 596#define TT_PORT		(0xff << 8)
 597#define TT_THINK_TIME(p)	(((p) & 0x3) << 16)
 
 598
 599/* dev_state bitmasks */
 600/* USB device address - assigned by the HC */
 601#define DEV_ADDR_MASK	(0xff)
 602/* bits 8:26 reserved */
 603/* Slot state */
 604#define SLOT_STATE	(0x1f << 27)
 605#define GET_SLOT_STATE(p)	(((p) & (0x1f << 27)) >> 27)
 606
 607#define SLOT_STATE_DISABLED	0
 608#define SLOT_STATE_ENABLED	SLOT_STATE_DISABLED
 609#define SLOT_STATE_DEFAULT	1
 610#define SLOT_STATE_ADDRESSED	2
 611#define SLOT_STATE_CONFIGURED	3
 612
 613/**
 614 * struct xhci_ep_ctx
 615 * @ep_info:	endpoint state, streams, mult, and interval information.
 616 * @ep_info2:	information on endpoint type, max packet size, max burst size,
 617 * 		error count, and whether the HC will force an event for all
 618 * 		transactions.
 619 * @deq:	64-bit ring dequeue pointer address.  If the endpoint only
 620 * 		defines one stream, this points to the endpoint transfer ring.
 621 * 		Otherwise, it points to a stream context array, which has a
 622 * 		ring pointer for each flow.
 623 * @tx_info:
 624 * 		Average TRB lengths for the endpoint ring and
 625 * 		max payload within an Endpoint Service Interval Time (ESIT).
 626 *
 627 * Endpoint Context - section 6.2.1.2.  This assumes the HC uses 32-byte context
 628 * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
 629 * reserved at the end of the endpoint context for HC internal use.
 630 */
 631struct xhci_ep_ctx {
 632	__le32	ep_info;
 633	__le32	ep_info2;
 634	__le64	deq;
 635	__le32	tx_info;
 636	/* offset 0x14 - 0x1f reserved for HC internal use */
 637	__le32	reserved[3];
 638};
 639
 640/* ep_info bitmasks */
 641/*
 642 * Endpoint State - bits 0:2
 643 * 0 - disabled
 644 * 1 - running
 645 * 2 - halted due to halt condition - ok to manipulate endpoint ring
 646 * 3 - stopped
 647 * 4 - TRB error
 648 * 5-7 - reserved
 649 */
 650#define EP_STATE_MASK		(0xf)
 651#define EP_STATE_DISABLED	0
 652#define EP_STATE_RUNNING	1
 653#define EP_STATE_HALTED		2
 654#define EP_STATE_STOPPED	3
 655#define EP_STATE_ERROR		4
 
 
 656/* Mult - Max number of burtst within an interval, in EP companion desc. */
 657#define EP_MULT(p)		(((p) & 0x3) << 8)
 658#define CTX_TO_EP_MULT(p)	(((p) >> 8) & 0x3)
 659/* bits 10:14 are Max Primary Streams */
 660/* bit 15 is Linear Stream Array */
 661/* Interval - period between requests to an endpoint - 125u increments. */
 662#define EP_INTERVAL(p)		(((p) & 0xff) << 16)
 663#define EP_INTERVAL_TO_UFRAMES(p)		(1 << (((p) >> 16) & 0xff))
 664#define CTX_TO_EP_INTERVAL(p)	(((p) >> 16) & 0xff)
 665#define EP_MAXPSTREAMS_MASK	(0x1f << 10)
 666#define EP_MAXPSTREAMS(p)	(((p) << 10) & EP_MAXPSTREAMS_MASK)
 
 667/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
 668#define	EP_HAS_LSA		(1 << 15)
 
 
 669
 670/* ep_info2 bitmasks */
 671/*
 672 * Force Event - generate transfer events for all TRBs for this endpoint
 673 * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
 674 */
 675#define	FORCE_EVENT	(0x1)
 676#define ERROR_COUNT(p)	(((p) & 0x3) << 1)
 677#define CTX_TO_EP_TYPE(p)	(((p) >> 3) & 0x7)
 678#define EP_TYPE(p)	((p) << 3)
 679#define ISOC_OUT_EP	1
 680#define BULK_OUT_EP	2
 681#define INT_OUT_EP	3
 682#define CTRL_EP		4
 683#define ISOC_IN_EP	5
 684#define BULK_IN_EP	6
 685#define INT_IN_EP	7
 686/* bit 6 reserved */
 687/* bit 7 is Host Initiate Disable - for disabling stream selection */
 688#define MAX_BURST(p)	(((p)&0xff) << 8)
 689#define CTX_TO_MAX_BURST(p)	(((p) >> 8) & 0xff)
 690#define MAX_PACKET(p)	(((p)&0xffff) << 16)
 691#define MAX_PACKET_MASK		(0xffff << 16)
 692#define MAX_PACKET_DECODED(p)	(((p) >> 16) & 0xffff)
 693
 694/* Get max packet size from ep desc. Bit 10..0 specify the max packet size.
 695 * USB2.0 spec 9.6.6.
 696 */
 697#define GET_MAX_PACKET(p)	((p) & 0x7ff)
 698
 699/* tx_info bitmasks */
 700#define AVG_TRB_LENGTH_FOR_EP(p)	((p) & 0xffff)
 701#define MAX_ESIT_PAYLOAD_FOR_EP(p)	(((p) & 0xffff) << 16)
 
 702#define CTX_TO_MAX_ESIT_PAYLOAD(p)	(((p) >> 16) & 0xffff)
 703
 704/* deq bitmasks */
 705#define EP_CTX_CYCLE_MASK		(1 << 0)
 706#define SCTX_DEQ_MASK			(~0xfL)
 707
 708
 709/**
 710 * struct xhci_input_control_context
 711 * Input control context; see section 6.2.5.
 712 *
 713 * @drop_context:	set the bit of the endpoint context you want to disable
 714 * @add_context:	set the bit of the endpoint context you want to enable
 715 */
 716struct xhci_input_control_ctx {
 717	__le32	drop_flags;
 718	__le32	add_flags;
 719	__le32	rsvd2[6];
 720};
 721
 722#define	EP_IS_ADDED(ctrl_ctx, i) \
 723	(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))
 724#define	EP_IS_DROPPED(ctrl_ctx, i)       \
 725	(le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1)))
 726
 727/* Represents everything that is needed to issue a command on the command ring.
 728 * It's useful to pre-allocate these for commands that cannot fail due to
 729 * out-of-memory errors, like freeing streams.
 730 */
 731struct xhci_command {
 732	/* Input context for changing device state */
 733	struct xhci_container_ctx	*in_ctx;
 734	u32				status;
 
 735	/* If completion is null, no one is waiting on this command
 736	 * and the structure can be freed after the command completes.
 737	 */
 738	struct completion		*completion;
 739	union xhci_trb			*command_trb;
 740	struct list_head		cmd_list;
 
 
 741};
 742
 743/* drop context bitmasks */
 744#define	DROP_EP(x)	(0x1 << x)
 745/* add context bitmasks */
 746#define	ADD_EP(x)	(0x1 << x)
 747
 748struct xhci_stream_ctx {
 749	/* 64-bit stream ring address, cycle state, and stream type */
 750	__le64	stream_ring;
 751	/* offset 0x14 - 0x1f reserved for HC internal use */
 752	__le32	reserved[2];
 753};
 754
 755/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
 756#define	SCT_FOR_CTX(p)		(((p) & 0x7) << 1)
 
 757/* Secondary stream array type, dequeue pointer is to a transfer ring */
 758#define	SCT_SEC_TR		0
 759/* Primary stream array type, dequeue pointer is to a transfer ring */
 760#define	SCT_PRI_TR		1
 761/* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */
 762#define SCT_SSA_8		2
 763#define SCT_SSA_16		3
 764#define SCT_SSA_32		4
 765#define SCT_SSA_64		5
 766#define SCT_SSA_128		6
 767#define SCT_SSA_256		7
 768
 769/* Assume no secondary streams for now */
 770struct xhci_stream_info {
 771	struct xhci_ring		**stream_rings;
 772	/* Number of streams, including stream 0 (which drivers can't use) */
 773	unsigned int			num_streams;
 774	/* The stream context array may be bigger than
 775	 * the number of streams the driver asked for
 776	 */
 777	struct xhci_stream_ctx		*stream_ctx_array;
 778	unsigned int			num_stream_ctxs;
 779	dma_addr_t			ctx_array_dma;
 780	/* For mapping physical TRB addresses to segments in stream rings */
 781	struct radix_tree_root		trb_address_map;
 782	struct xhci_command		*free_streams_command;
 783};
 784
 785#define	SMALL_STREAM_ARRAY_SIZE		256
 786#define	MEDIUM_STREAM_ARRAY_SIZE	1024
 787
 788/* Some Intel xHCI host controllers need software to keep track of the bus
 789 * bandwidth.  Keep track of endpoint info here.  Each root port is allocated
 790 * the full bus bandwidth.  We must also treat TTs (including each port under a
 791 * multi-TT hub) as a separate bandwidth domain.  The direct memory interface
 792 * (DMI) also limits the total bandwidth (across all domains) that can be used.
 793 */
 794struct xhci_bw_info {
 795	/* ep_interval is zero-based */
 796	unsigned int		ep_interval;
 797	/* mult and num_packets are one-based */
 798	unsigned int		mult;
 799	unsigned int		num_packets;
 800	unsigned int		max_packet_size;
 801	unsigned int		max_esit_payload;
 802	unsigned int		type;
 803};
 804
 805/* "Block" sizes in bytes the hardware uses for different device speeds.
 806 * The logic in this part of the hardware limits the number of bits the hardware
 807 * can use, so must represent bandwidth in a less precise manner to mimic what
 808 * the scheduler hardware computes.
 809 */
 810#define	FS_BLOCK	1
 811#define	HS_BLOCK	4
 812#define	SS_BLOCK	16
 813#define	DMI_BLOCK	32
 814
 815/* Each device speed has a protocol overhead (CRC, bit stuffing, etc) associated
 816 * with each byte transferred.  SuperSpeed devices have an initial overhead to
 817 * set up bursts.  These are in blocks, see above.  LS overhead has already been
 818 * translated into FS blocks.
 819 */
 820#define DMI_OVERHEAD 8
 821#define DMI_OVERHEAD_BURST 4
 822#define SS_OVERHEAD 8
 823#define SS_OVERHEAD_BURST 32
 824#define HS_OVERHEAD 26
 825#define FS_OVERHEAD 20
 826#define LS_OVERHEAD 128
 827/* The TTs need to claim roughly twice as much bandwidth (94 bytes per
 828 * microframe ~= 24Mbps) of the HS bus as the devices can actually use because
 829 * of overhead associated with split transfers crossing microframe boundaries.
 830 * 31 blocks is pure protocol overhead.
 831 */
 832#define TT_HS_OVERHEAD (31 + 94)
 833#define TT_DMI_OVERHEAD (25 + 12)
 834
 835/* Bandwidth limits in blocks */
 836#define FS_BW_LIMIT		1285
 837#define TT_BW_LIMIT		1320
 838#define HS_BW_LIMIT		1607
 839#define SS_BW_LIMIT_IN		3906
 840#define DMI_BW_LIMIT_IN		3906
 841#define SS_BW_LIMIT_OUT		3906
 842#define DMI_BW_LIMIT_OUT	3906
 843
 844/* Percentage of bus bandwidth reserved for non-periodic transfers */
 845#define FS_BW_RESERVED		10
 846#define HS_BW_RESERVED		20
 847#define SS_BW_RESERVED		10
 848
 849struct xhci_virt_ep {
 
 
 850	struct xhci_ring		*ring;
 851	/* Related to endpoints that are configured to use stream IDs only */
 852	struct xhci_stream_info		*stream_info;
 853	/* Temporary storage in case the configure endpoint command fails and we
 854	 * have to restore the device state to the previous state
 855	 */
 856	struct xhci_ring		*new_ring;
 
 857	unsigned int			ep_state;
 858#define SET_DEQ_PENDING		(1 << 0)
 859#define EP_HALTED		(1 << 1)	/* For stall handling */
 860#define EP_HALT_PENDING		(1 << 2)	/* For URB cancellation */
 861/* Transitioning the endpoint to using streams, don't enqueue URBs */
 862#define EP_GETTING_STREAMS	(1 << 3)
 863#define EP_HAS_STREAMS		(1 << 4)
 864/* Transitioning the endpoint to not using streams, don't enqueue URBs */
 865#define EP_GETTING_NO_STREAMS	(1 << 5)
 
 
 
 
 866	/* ----  Related to URB cancellation ---- */
 867	struct list_head	cancelled_td_list;
 868	struct xhci_td		*stopped_td;
 869	unsigned int		stopped_stream;
 870	/* Watchdog timer for stop endpoint command to cancel URBs */
 871	struct timer_list	stop_cmd_timer;
 872	int			stop_cmds_pending;
 873	struct xhci_hcd		*xhci;
 874	/* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue
 875	 * command.  We'll need to update the ring's dequeue segment and dequeue
 876	 * pointer after the command completes.
 877	 */
 878	struct xhci_segment	*queued_deq_seg;
 879	union xhci_trb		*queued_deq_ptr;
 880	/*
 881	 * Sometimes the xHC can not process isochronous endpoint ring quickly
 882	 * enough, and it will miss some isoc tds on the ring and generate
 883	 * a Missed Service Error Event.
 884	 * Set skip flag when receive a Missed Service Error Event and
 885	 * process the missed tds on the endpoint ring.
 886	 */
 887	bool			skip;
 888	/* Bandwidth checking storage */
 889	struct xhci_bw_info	bw_info;
 890	struct list_head	bw_endpoint_list;
 
 
 
 
 
 891};
 892
 893enum xhci_overhead_type {
 894	LS_OVERHEAD_TYPE = 0,
 895	FS_OVERHEAD_TYPE,
 896	HS_OVERHEAD_TYPE,
 897};
 898
 899struct xhci_interval_bw {
 900	unsigned int		num_packets;
 901	/* Sorted by max packet size.
 902	 * Head of the list is the greatest max packet size.
 903	 */
 904	struct list_head	endpoints;
 905	/* How many endpoints of each speed are present. */
 906	unsigned int		overhead[3];
 907};
 908
 909#define	XHCI_MAX_INTERVAL	16
 910
 911struct xhci_interval_bw_table {
 912	unsigned int		interval0_esit_payload;
 913	struct xhci_interval_bw	interval_bw[XHCI_MAX_INTERVAL];
 914	/* Includes reserved bandwidth for async endpoints */
 915	unsigned int		bw_used;
 916	unsigned int		ss_bw_in;
 917	unsigned int		ss_bw_out;
 918};
 919
 
 920
 921struct xhci_virt_device {
 
 922	struct usb_device		*udev;
 923	/*
 924	 * Commands to the hardware are passed an "input context" that
 925	 * tells the hardware what to change in its data structures.
 926	 * The hardware will return changes in an "output context" that
 927	 * software must allocate for the hardware.  We need to keep
 928	 * track of input and output contexts separately because
 929	 * these commands might fail and we don't trust the hardware.
 930	 */
 931	struct xhci_container_ctx       *out_ctx;
 932	/* Used for addressing devices and configuration changes */
 933	struct xhci_container_ctx       *in_ctx;
 934	/* Rings saved to ensure old alt settings can be re-instated */
 935	struct xhci_ring		**ring_cache;
 936	int				num_rings_cached;
 937#define	XHCI_MAX_RINGS_CACHED	31
 938	struct xhci_virt_ep		eps[31];
 939	struct completion		cmd_completion;
 940	/* Status of the last command issued for this device */
 941	u32				cmd_status;
 942	struct list_head		cmd_list;
 943	u8				fake_port;
 944	u8				real_port;
 945	struct xhci_interval_bw_table	*bw_table;
 946	struct xhci_tt_bw_info		*tt_info;
 
 
 
 
 
 
 
 
 
 947	/* The current max exit latency for the enabled USB3 link states. */
 948	u16				current_mel;
 
 
 949};
 950
 951/*
 952 * For each roothub, keep track of the bandwidth information for each periodic
 953 * interval.
 954 *
 955 * If a high speed hub is attached to the roothub, each TT associated with that
 956 * hub is a separate bandwidth domain.  The interval information for the
 957 * endpoints on the devices under that TT will appear in the TT structure.
 958 */
 959struct xhci_root_port_bw_info {
 960	struct list_head		tts;
 961	unsigned int			num_active_tts;
 962	struct xhci_interval_bw_table	bw_table;
 963};
 964
 965struct xhci_tt_bw_info {
 966	struct list_head		tt_list;
 967	int				slot_id;
 968	int				ttport;
 969	struct xhci_interval_bw_table	bw_table;
 970	int				active_eps;
 971};
 972
 973
 974/**
 975 * struct xhci_device_context_array
 976 * @dev_context_ptr	array of 64-bit DMA addresses for device contexts
 977 */
 978struct xhci_device_context_array {
 979	/* 64-bit device addresses; we only write 32-bit addresses */
 980	__le64			dev_context_ptrs[MAX_HC_SLOTS];
 981	/* private xHCD pointers */
 982	dma_addr_t	dma;
 983};
 984/* TODO: write function to set the 64-bit device DMA address */
 985/*
 986 * TODO: change this to be dynamically sized at HC mem init time since the HC
 987 * might not be able to handle the maximum number of devices possible.
 988 */
 989
 990
 991struct xhci_transfer_event {
 992	/* 64-bit buffer address, or immediate data */
 993	__le64	buffer;
 994	__le32	transfer_len;
 995	/* This field is interpreted differently based on the type of TRB */
 996	__le32	flags;
 997};
 998
 
 
 
 
 
 
 
 
 
 
 999/* Transfer event TRB length bit mask */
1000/* bits 0:23 */
1001#define	EVENT_TRB_LEN(p)		((p) & 0xffffff)
1002
1003/** Transfer Event bit fields **/
1004#define	TRB_TO_EP_ID(p)	(((p) >> 16) & 0x1f)
1005
1006/* Completion Code - only applicable for some types of TRBs */
1007#define	COMP_CODE_MASK		(0xff << 24)
1008#define GET_COMP_CODE(p)	(((p) & COMP_CODE_MASK) >> 24)
1009#define COMP_SUCCESS	1
1010/* Data Buffer Error */
1011#define COMP_DB_ERR	2
1012/* Babble Detected Error */
1013#define COMP_BABBLE	3
1014/* USB Transaction Error */
1015#define COMP_TX_ERR	4
1016/* TRB Error - some TRB field is invalid */
1017#define COMP_TRB_ERR	5
1018/* Stall Error - USB device is stalled */
1019#define COMP_STALL	6
1020/* Resource Error - HC doesn't have memory for that device configuration */
1021#define COMP_ENOMEM	7
1022/* Bandwidth Error - not enough room in schedule for this dev config */
1023#define COMP_BW_ERR	8
1024/* No Slots Available Error - HC ran out of device slots */
1025#define COMP_ENOSLOTS	9
1026/* Invalid Stream Type Error */
1027#define COMP_STREAM_ERR	10
1028/* Slot Not Enabled Error - doorbell rung for disabled device slot */
1029#define COMP_EBADSLT	11
1030/* Endpoint Not Enabled Error */
1031#define COMP_EBADEP	12
1032/* Short Packet */
1033#define COMP_SHORT_TX	13
1034/* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */
1035#define COMP_UNDERRUN	14
1036/* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */
1037#define COMP_OVERRUN	15
1038/* Virtual Function Event Ring Full Error */
1039#define COMP_VF_FULL	16
1040/* Parameter Error - Context parameter is invalid */
1041#define COMP_EINVAL	17
1042/* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */
1043#define COMP_BW_OVER	18
1044/* Context State Error - illegal context state transition requested */
1045#define COMP_CTX_STATE	19
1046/* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */
1047#define COMP_PING_ERR	20
1048/* Event Ring is full */
1049#define COMP_ER_FULL	21
1050/* Incompatible Device Error */
1051#define COMP_DEV_ERR	22
1052/* Missed Service Error - HC couldn't service an isoc ep within interval */
1053#define COMP_MISSED_INT	23
1054/* Successfully stopped command ring */
1055#define COMP_CMD_STOP	24
1056/* Successfully aborted current command and stopped command ring */
1057#define COMP_CMD_ABORT	25
1058/* Stopped - transfer was terminated by a stop endpoint command */
1059#define COMP_STOP	26
1060/* Same as COMP_EP_STOPPED, but the transferred length in the event is invalid */
1061#define COMP_STOP_INVAL	27
1062/* Control Abort Error - Debug Capability - control pipe aborted */
1063#define COMP_DBG_ABORT	28
1064/* Max Exit Latency Too Large Error */
1065#define COMP_MEL_ERR	29
1066/* TRB type 30 reserved */
1067/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
1068#define COMP_BUFF_OVER	31
1069/* Event Lost Error - xHC has an "internal event overrun condition" */
1070#define COMP_ISSUES	32
1071/* Undefined Error - reported when other error codes don't apply */
1072#define COMP_UNKNOWN	33
1073/* Invalid Stream ID Error */
1074#define COMP_STRID_ERR	34
1075/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
1076#define COMP_2ND_BW_ERR	35
1077/* Split Transaction Error */
1078#define	COMP_SPLIT_ERR	36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1079
1080struct xhci_link_trb {
1081	/* 64-bit segment pointer*/
1082	__le64 segment_ptr;
1083	__le32 intr_target;
1084	__le32 control;
1085};
1086
1087/* control bitfields */
1088#define LINK_TOGGLE	(0x1<<1)
1089
1090/* Command completion event TRB */
1091struct xhci_event_cmd {
1092	/* Pointer to command TRB, or the value passed by the event data trb */
1093	__le64 cmd_trb;
1094	__le32 status;
1095	__le32 flags;
1096};
1097
1098/* flags bitmasks */
1099
1100/* Address device - disable SetAddress */
1101#define TRB_BSR		(1<<9)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1102enum xhci_setup_dev {
1103	SETUP_CONTEXT_ONLY,
1104	SETUP_CONTEXT_ADDRESS,
1105};
1106
1107/* bits 16:23 are the virtual function ID */
1108/* bits 24:31 are the slot ID */
1109#define TRB_TO_SLOT_ID(p)	(((p) & (0xff<<24)) >> 24)
1110#define SLOT_ID_FOR_TRB(p)	(((p) & 0xff) << 24)
1111
1112/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
1113#define TRB_TO_EP_INDEX(p)		((((p) & (0x1f << 16)) >> 16) - 1)
1114#define	EP_ID_FOR_TRB(p)		((((p) + 1) & 0x1f) << 16)
1115
1116#define SUSPEND_PORT_FOR_TRB(p)		(((p) & 1) << 23)
1117#define TRB_TO_SUSPEND_PORT(p)		(((p) & (1 << 23)) >> 23)
1118#define LAST_EP_INDEX			30
1119
1120/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
1121#define TRB_TO_STREAM_ID(p)		((((p) & (0xffff << 16)) >> 16))
1122#define STREAM_ID_FOR_TRB(p)		((((p)) & 0xffff) << 16)
1123#define SCT_FOR_TRB(p)			(((p) << 1) & 0x7)
1124
 
 
1125
1126/* Port Status Change Event TRB fields */
1127/* Port ID - bits 31:24 */
1128#define GET_PORT_ID(p)		(((p) & (0xff << 24)) >> 24)
1129
 
 
1130/* Normal TRB fields */
1131/* transfer_len bitmasks - bits 0:16 */
1132#define	TRB_LEN(p)		((p) & 0x1ffff)
 
 
 
 
 
1133/* Interrupter Target - which MSI-X vector to target the completion event at */
1134#define TRB_INTR_TARGET(p)	(((p) & 0x3ff) << 22)
1135#define GET_INTR_TARGET(p)	(((p) >> 22) & 0x3ff)
1136#define TRB_TBC(p)		(((p) & 0x3) << 7)
1137#define TRB_TLBPC(p)		(((p) & 0xf) << 16)
1138
1139/* Cycle bit - indicates TRB ownership by HC or HCD */
1140#define TRB_CYCLE		(1<<0)
1141/*
1142 * Force next event data TRB to be evaluated before task switch.
1143 * Used to pass OS data back after a TD completes.
1144 */
1145#define TRB_ENT			(1<<1)
1146/* Interrupt on short packet */
1147#define TRB_ISP			(1<<2)
1148/* Set PCIe no snoop attribute */
1149#define TRB_NO_SNOOP		(1<<3)
1150/* Chain multiple TRBs into a TD */
1151#define TRB_CHAIN		(1<<4)
1152/* Interrupt on completion */
1153#define TRB_IOC			(1<<5)
1154/* The buffer pointer contains immediate data */
1155#define TRB_IDT			(1<<6)
 
 
1156
1157/* Block Event Interrupt */
1158#define	TRB_BEI			(1<<9)
1159
1160/* Control transfer TRB specific fields */
1161#define TRB_DIR_IN		(1<<16)
1162#define	TRB_TX_TYPE(p)		((p) << 16)
1163#define	TRB_DATA_OUT		2
1164#define	TRB_DATA_IN		3
1165
1166/* Isochronous TRB specific fields */
1167#define TRB_SIA			(1<<31)
 
 
 
 
 
 
 
 
 
 
 
1168
1169struct xhci_generic_trb {
1170	__le32 field[4];
1171};
1172
1173union xhci_trb {
1174	struct xhci_link_trb		link;
1175	struct xhci_transfer_event	trans_event;
1176	struct xhci_event_cmd		event_cmd;
1177	struct xhci_generic_trb		generic;
1178};
1179
1180/* TRB bit mask */
1181#define	TRB_TYPE_BITMASK	(0xfc00)
1182#define TRB_TYPE(p)		((p) << 10)
1183#define TRB_FIELD_TO_TYPE(p)	(((p) & TRB_TYPE_BITMASK) >> 10)
1184/* TRB type IDs */
1185/* bulk, interrupt, isoc scatter/gather, and control data stage */
1186#define TRB_NORMAL		1
1187/* setup stage for control transfers */
1188#define TRB_SETUP		2
1189/* data stage for control transfers */
1190#define TRB_DATA		3
1191/* status stage for control transfers */
1192#define TRB_STATUS		4
1193/* isoc transfers */
1194#define TRB_ISOC		5
1195/* TRB for linking ring segments */
1196#define TRB_LINK		6
1197#define TRB_EVENT_DATA		7
1198/* Transfer Ring No-op (not for the command ring) */
1199#define TRB_TR_NOOP		8
1200/* Command TRBs */
1201/* Enable Slot Command */
1202#define TRB_ENABLE_SLOT		9
1203/* Disable Slot Command */
1204#define TRB_DISABLE_SLOT	10
1205/* Address Device Command */
1206#define TRB_ADDR_DEV		11
1207/* Configure Endpoint Command */
1208#define TRB_CONFIG_EP		12
1209/* Evaluate Context Command */
1210#define TRB_EVAL_CONTEXT	13
1211/* Reset Endpoint Command */
1212#define TRB_RESET_EP		14
1213/* Stop Transfer Ring Command */
1214#define TRB_STOP_RING		15
1215/* Set Transfer Ring Dequeue Pointer Command */
1216#define TRB_SET_DEQ		16
1217/* Reset Device Command */
1218#define TRB_RESET_DEV		17
1219/* Force Event Command (opt) */
1220#define TRB_FORCE_EVENT		18
1221/* Negotiate Bandwidth Command (opt) */
1222#define TRB_NEG_BANDWIDTH	19
1223/* Set Latency Tolerance Value Command (opt) */
1224#define TRB_SET_LT		20
1225/* Get port bandwidth Command */
1226#define TRB_GET_BW		21
1227/* Force Header Command - generate a transaction or link management packet */
1228#define TRB_FORCE_HEADER	22
1229/* No-op Command - not for transfer rings */
1230#define TRB_CMD_NOOP		23
1231/* TRB IDs 24-31 reserved */
1232/* Event TRBS */
1233/* Transfer Event */
1234#define TRB_TRANSFER		32
1235/* Command Completion Event */
1236#define TRB_COMPLETION		33
1237/* Port Status Change Event */
1238#define TRB_PORT_STATUS		34
1239/* Bandwidth Request Event (opt) */
1240#define TRB_BANDWIDTH_EVENT	35
1241/* Doorbell Event (opt) */
1242#define TRB_DOORBELL		36
1243/* Host Controller Event */
1244#define TRB_HC_EVENT		37
1245/* Device Notification Event - device sent function wake notification */
1246#define TRB_DEV_NOTE		38
1247/* MFINDEX Wrap Event - microframe counter wrapped */
1248#define TRB_MFINDEX_WRAP	39
1249/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
1250
1251/* Nec vendor-specific command completion event. */
1252#define	TRB_NEC_CMD_COMP	48
1253/* Get NEC firmware revision. */
1254#define	TRB_NEC_GET_FW		49
1255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1256#define TRB_TYPE_LINK(x)	(((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
1257/* Above, but for __le32 types -- can avoid work by swapping constants: */
1258#define TRB_TYPE_LINK_LE32(x)	(((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
1259				 cpu_to_le32(TRB_TYPE(TRB_LINK)))
1260#define TRB_TYPE_NOOP_LE32(x)	(((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
1261				 cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
1262
1263#define NEC_FW_MINOR(p)		(((p) >> 0) & 0xff)
1264#define NEC_FW_MAJOR(p)		(((p) >> 8) & 0xff)
1265
1266/*
1267 * TRBS_PER_SEGMENT must be a multiple of 4,
1268 * since the command ring is 64-byte aligned.
1269 * It must also be greater than 16.
1270 */
1271#define TRBS_PER_SEGMENT	64
1272/* Allow two commands + a link TRB, along with any reserved command TRBs */
1273#define MAX_RSVD_CMD_TRBS	(TRBS_PER_SEGMENT - 3)
1274#define TRB_SEGMENT_SIZE	(TRBS_PER_SEGMENT*16)
1275#define TRB_SEGMENT_SHIFT	(ilog2(TRB_SEGMENT_SIZE))
1276/* TRB buffer pointers can't cross 64KB boundaries */
1277#define TRB_MAX_BUFF_SHIFT		16
1278#define TRB_MAX_BUFF_SIZE	(1 << TRB_MAX_BUFF_SHIFT)
 
 
 
 
 
 
 
 
 
 
 
 
 
1279
1280struct xhci_segment {
1281	union xhci_trb		*trbs;
1282	/* private to HCD */
1283	struct xhci_segment	*next;
 
1284	dma_addr_t		dma;
 
 
 
 
 
 
 
 
 
 
 
 
 
1285};
1286
1287struct xhci_td {
1288	struct list_head	td_list;
1289	struct list_head	cancelled_td_list;
 
 
1290	struct urb		*urb;
1291	struct xhci_segment	*start_seg;
1292	union xhci_trb		*first_trb;
1293	union xhci_trb		*last_trb;
 
 
 
 
 
1294};
1295
1296/* xHCI command default timeout value */
1297#define XHCI_CMD_DEFAULT_TIMEOUT	(5 * HZ)
 
 
 
1298
1299/* command descriptor */
1300struct xhci_cd {
1301	struct list_head	cancel_cmd_list;
1302	struct xhci_command	*command;
1303	union xhci_trb		*cmd_trb;
1304};
1305
1306struct xhci_dequeue_state {
1307	struct xhci_segment *new_deq_seg;
1308	union xhci_trb *new_deq_ptr;
1309	int new_cycle_state;
1310};
1311
1312enum xhci_ring_type {
1313	TYPE_CTRL = 0,
1314	TYPE_ISOC,
1315	TYPE_BULK,
1316	TYPE_INTR,
1317	TYPE_STREAM,
1318	TYPE_COMMAND,
1319	TYPE_EVENT,
1320};
1321
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1322struct xhci_ring {
1323	struct xhci_segment	*first_seg;
1324	struct xhci_segment	*last_seg;
1325	union  xhci_trb		*enqueue;
1326	struct xhci_segment	*enq_seg;
1327	unsigned int		enq_updates;
1328	union  xhci_trb		*dequeue;
1329	struct xhci_segment	*deq_seg;
1330	unsigned int		deq_updates;
1331	struct list_head	td_list;
1332	/*
1333	 * Write the cycle state into the TRB cycle field to give ownership of
1334	 * the TRB to the host controller (if we are the producer), or to check
1335	 * if we own the TRB (if we are the consumer).  See section 4.9.1.
1336	 */
1337	u32			cycle_state;
1338	unsigned int		stream_id;
1339	unsigned int		num_segs;
1340	unsigned int		num_trbs_free;
1341	unsigned int		num_trbs_free_temp;
1342	enum xhci_ring_type	type;
1343	bool			last_td_was_short;
1344	struct radix_tree_root	*trb_address_map;
1345};
1346
1347struct xhci_erst_entry {
1348	/* 64-bit event ring segment address */
1349	__le64	seg_addr;
1350	__le32	seg_size;
1351	/* Set to zero */
1352	__le32	rsvd;
1353};
1354
1355struct xhci_erst {
1356	struct xhci_erst_entry	*entries;
1357	unsigned int		num_entries;
1358	/* xhci->event_ring keeps track of segment dma addresses */
1359	dma_addr_t		erst_dma_addr;
1360	/* Num entries the ERST can contain */
1361	unsigned int		erst_size;
1362};
1363
1364struct xhci_scratchpad {
1365	u64 *sp_array;
1366	dma_addr_t sp_dma;
1367	void **sp_buffers;
1368	dma_addr_t *sp_dma_buffers;
1369};
1370
1371struct urb_priv {
1372	int	length;
1373	int	td_cnt;
1374	struct	xhci_td	*td[0];
1375};
1376
1377/*
1378 * Each segment table entry is 4*32bits long.  1K seems like an ok size:
1379 * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
1380 * meaning 64 ring segments.
1381 * Initial allocated size of the ERST, in number of entries */
1382#define	ERST_NUM_SEGS	1
1383/* Initial allocated size of the ERST, in number of entries */
1384#define	ERST_SIZE	64
1385/* Initial number of event segment rings allocated */
1386#define	ERST_ENTRIES	1
1387/* Poll every 60 seconds */
1388#define	POLL_TIMEOUT	60
1389/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
1390#define XHCI_STOP_EP_CMD_TIMEOUT	5
1391/* XXX: Make these module parameters */
1392
1393struct s3_save {
1394	u32	command;
1395	u32	dev_nt;
1396	u64	dcbaa_ptr;
1397	u32	config_reg;
1398	u32	irq_pending;
1399	u32	irq_control;
1400	u32	erst_size;
1401	u64	erst_base;
1402	u64	erst_dequeue;
1403};
1404
1405/* Use for lpm */
1406struct dev_info {
1407	u32			dev_id;
1408	struct	list_head	list;
1409};
1410
1411struct xhci_bus_state {
1412	unsigned long		bus_suspended;
1413	unsigned long		next_statechange;
1414
1415	/* Port suspend arrays are indexed by the portnum of the fake roothub */
1416	/* ports suspend status arrays - max 31 ports for USB2, 15 for USB3 */
1417	u32			port_c_suspend;
1418	u32			suspended_ports;
1419	u32			port_remote_wakeup;
1420	unsigned long		resume_done[USB_MAXCHILDREN];
1421	/* which ports have started to resume */
1422	unsigned long		resuming_ports;
1423	/* Which ports are waiting on RExit to U0 transition. */
1424	unsigned long		rexit_ports;
1425	struct completion	rexit_done[USB_MAXCHILDREN];
1426};
1427
1428
 
 
 
 
 
 
 
 
 
 
 
 
 
1429/*
1430 * It can take up to 20 ms to transition from RExit to U0 on the
1431 * Intel Lynx Point LP xHCI host.
1432 */
1433#define	XHCI_MAX_REXIT_TIMEOUT	(20 * 1000)
1434
1435static inline unsigned int hcd_index(struct usb_hcd *hcd)
1436{
1437	if (hcd->speed == HCD_USB3)
1438		return 0;
1439	else
1440		return 1;
1441}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1442
1443/* There is one xhci_hcd structure per controller */
1444struct xhci_hcd {
1445	struct usb_hcd *main_hcd;
1446	struct usb_hcd *shared_hcd;
1447	/* glue to PCI and HCD framework */
1448	struct xhci_cap_regs __iomem *cap_regs;
1449	struct xhci_op_regs __iomem *op_regs;
1450	struct xhci_run_regs __iomem *run_regs;
1451	struct xhci_doorbell_array __iomem *dba;
1452	/* Our HCD's current interrupter register set */
1453	struct	xhci_intr_reg __iomem *ir_set;
1454
1455	/* Cached register copies of read-only HC data */
1456	__u32		hcs_params1;
1457	__u32		hcs_params2;
1458	__u32		hcs_params3;
1459	__u32		hcc_params;
 
1460
1461	spinlock_t	lock;
1462
1463	/* packed release number */
1464	u8		sbrn;
1465	u16		hci_version;
1466	u8		max_slots;
1467	u8		max_interrupters;
1468	u8		max_ports;
1469	u8		isoc_threshold;
1470	int		event_ring_max;
1471	int		addr_64;
1472	/* 4KB min, 128MB max */
1473	int		page_size;
1474	/* Valid values are 12 to 20, inclusive */
1475	int		page_shift;
1476	/* msi-x vectors */
1477	int		msix_count;
1478	struct msix_entry	*msix_entries;
 
 
 
 
1479	/* data structures */
1480	struct xhci_device_context_array *dcbaa;
 
1481	struct xhci_ring	*cmd_ring;
1482	unsigned int            cmd_ring_state;
1483#define CMD_RING_STATE_RUNNING         (1 << 0)
1484#define CMD_RING_STATE_ABORTED         (1 << 1)
1485#define CMD_RING_STATE_STOPPED         (1 << 2)
1486	struct list_head        cancel_cmd_list;
1487	unsigned int		cmd_ring_reserved_trbs;
1488	struct xhci_ring	*event_ring;
1489	struct xhci_erst	erst;
 
 
1490	/* Scratchpad */
1491	struct xhci_scratchpad  *scratchpad;
1492	/* Store LPM test failed devices' information */
1493	struct list_head	lpm_failed_devs;
1494
1495	/* slot enabling and address device helpers */
1496	struct completion	addr_dev;
1497	int slot_id;
1498	/* For USB 3.0 LPM enable/disable. */
1499	struct xhci_command		*lpm_command;
1500	/* Internal mirror of the HW's dcbaa */
1501	struct xhci_virt_device	*devs[MAX_HC_SLOTS];
1502	/* For keeping track of bandwidth domains per roothub. */
1503	struct xhci_root_port_bw_info	*rh_bw;
1504
1505	/* DMA pools */
1506	struct dma_pool	*device_pool;
1507	struct dma_pool	*segment_pool;
1508	struct dma_pool	*small_streams_pool;
1509	struct dma_pool	*medium_streams_pool;
1510
1511	/* Host controller watchdog timer structures */
1512	unsigned int		xhc_state;
1513
1514	u32			command;
1515	struct s3_save		s3;
1516/* Host controller is dying - not responding to commands. "I'm not dead yet!"
1517 *
1518 * xHC interrupts have been disabled and a watchdog timer will (or has already)
1519 * halt the xHCI host, and complete all URBs with an -ESHUTDOWN code.  Any code
1520 * that sees this status (other than the timer that set it) should stop touching
1521 * hardware immediately.  Interrupt handlers should return immediately when
1522 * they see this status (any time they drop and re-acquire xhci->lock).
1523 * xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without
1524 * putting the TD on the canceled list, etc.
1525 *
1526 * There are no reports of xHCI host controllers that display this issue.
1527 */
1528#define XHCI_STATE_DYING	(1 << 0)
1529#define XHCI_STATE_HALTED	(1 << 1)
1530	/* Statistics */
1531	int			error_bitmask;
1532	unsigned int		quirks;
1533#define	XHCI_LINK_TRB_QUIRK	(1 << 0)
1534#define XHCI_RESET_EP_QUIRK	(1 << 1)
1535#define XHCI_NEC_HOST		(1 << 2)
1536#define XHCI_AMD_PLL_FIX	(1 << 3)
1537#define XHCI_SPURIOUS_SUCCESS	(1 << 4)
1538/*
1539 * Certain Intel host controllers have a limit to the number of endpoint
1540 * contexts they can handle.  Ideally, they would signal that they can't handle
1541 * anymore endpoint contexts by returning a Resource Error for the Configure
1542 * Endpoint command, but they don't.  Instead they expect software to keep track
1543 * of the number of active endpoints for them, across configure endpoint
1544 * commands, reset device commands, disable slot commands, and address device
1545 * commands.
1546 */
1547#define XHCI_EP_LIMIT_QUIRK	(1 << 5)
1548#define XHCI_BROKEN_MSI		(1 << 6)
1549#define XHCI_RESET_ON_RESUME	(1 << 7)
1550#define	XHCI_SW_BW_CHECKING	(1 << 8)
1551#define XHCI_AMD_0x96_HOST	(1 << 9)
1552#define XHCI_TRUST_TX_LENGTH	(1 << 10)
1553#define XHCI_LPM_SUPPORT	(1 << 11)
1554#define XHCI_INTEL_HOST		(1 << 12)
1555#define XHCI_SPURIOUS_REBOOT	(1 << 13)
1556#define XHCI_COMP_MODE_QUIRK	(1 << 14)
1557#define XHCI_AVOID_BEI		(1 << 15)
1558#define XHCI_PLAT		(1 << 16)
1559#define XHCI_SLOW_SUSPEND	(1 << 17)
1560#define XHCI_SPURIOUS_WAKEUP	(1 << 18)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1561	unsigned int		num_active_eps;
1562	unsigned int		limit_active_eps;
1563	/* There are two roothubs to keep track of bus suspend info for */
1564	struct xhci_bus_state   bus_state[2];
1565	/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
1566	u8			*port_array;
1567	/* Array of pointers to USB 3.0 PORTSC registers */
1568	__le32 __iomem		**usb3_ports;
1569	unsigned int		num_usb3_ports;
1570	/* Array of pointers to USB 2.0 PORTSC registers */
1571	__le32 __iomem		**usb2_ports;
1572	unsigned int		num_usb2_ports;
1573	/* support xHCI 0.96 spec USB2 software LPM */
1574	unsigned		sw_lpm_support:1;
1575	/* support xHCI 1.0 spec USB2 hardware LPM */
1576	unsigned		hw_lpm_support:1;
1577	/* cached usb2 extened protocol capabilites */
1578	u32                     *ext_caps;
1579	unsigned int            num_ext_caps;
 
 
 
 
1580	/* Compliance Mode Recovery Data */
1581	struct timer_list	comp_mode_recovery_timer;
1582	u32			port_status_u0;
 
1583/* Compliance Mode Timer Triggered every 2 seconds */
1584#define COMP_MODE_RCVRY_MSECS 2000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1585};
1586
 
 
1587/* convert between an HCD pointer and the corresponding EHCI_HCD */
1588static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
1589{
1590	return *((struct xhci_hcd **) (hcd->hcd_priv));
 
 
 
 
 
 
 
1591}
1592
1593static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
1594{
1595	return xhci->main_hcd;
1596}
1597
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1598#define xhci_dbg(xhci, fmt, args...) \
1599	dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1600#define xhci_err(xhci, fmt, args...) \
1601	dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1602#define xhci_warn(xhci, fmt, args...) \
1603	dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1604#define xhci_warn_ratelimited(xhci, fmt, args...) \
1605	dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1606
1607/*
1608 * Registers should always be accessed with double word or quad word accesses.
1609 *
1610 * Some xHCI implementations may support 64-bit address pointers.  Registers
1611 * with 64-bit address pointers should be written to with dword accesses by
1612 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
1613 * xHCI implementations that do not support 64-bit address pointers will ignore
1614 * the high dword, and write order is irrelevant.
1615 */
1616static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
1617		__le64 __iomem *regs)
1618{
1619	__u32 __iomem *ptr = (__u32 __iomem *) regs;
1620	u64 val_lo = readl(ptr);
1621	u64 val_hi = readl(ptr + 1);
1622	return val_lo + (val_hi << 32);
1623}
1624static inline void xhci_write_64(struct xhci_hcd *xhci,
1625				 const u64 val, __le64 __iomem *regs)
1626{
1627	__u32 __iomem *ptr = (__u32 __iomem *) regs;
1628	u32 val_lo = lower_32_bits(val);
1629	u32 val_hi = upper_32_bits(val);
1630
1631	writel(val_lo, ptr);
1632	writel(val_hi, ptr + 1);
1633}
1634
1635static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
 
 
1636{
1637	return xhci->quirks & XHCI_LINK_TRB_QUIRK;
 
1638}
1639
1640/* xHCI debugging */
1641void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
1642void xhci_print_registers(struct xhci_hcd *xhci);
1643void xhci_dbg_regs(struct xhci_hcd *xhci);
1644void xhci_print_run_regs(struct xhci_hcd *xhci);
1645void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb);
1646void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb);
1647void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
1648void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
1649void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
1650void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
1651void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
1652void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
1653char *xhci_get_slot_state(struct xhci_hcd *xhci,
1654		struct xhci_container_ctx *ctx);
1655void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
1656		unsigned int slot_id, unsigned int ep_index,
1657		struct xhci_virt_ep *ep);
1658void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
1659			const char *fmt, ...);
1660
1661/* xHCI memory management */
1662void xhci_mem_cleanup(struct xhci_hcd *xhci);
1663int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
1664void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
1665int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
1666int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
1667void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1668		struct usb_device *udev);
1669unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
1670unsigned int xhci_get_endpoint_address(unsigned int ep_index);
1671unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
1672unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
1673unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
1674void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
1675void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
1676		struct xhci_bw_info *ep_bw,
1677		struct xhci_interval_bw_table *bw_table,
1678		struct usb_device *udev,
1679		struct xhci_virt_ep *virt_ep,
1680		struct xhci_tt_bw_info *tt_info);
1681void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
1682		struct xhci_virt_device *virt_dev,
1683		int old_active_eps);
1684void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info);
1685void xhci_update_bw_info(struct xhci_hcd *xhci,
1686		struct xhci_container_ctx *in_ctx,
1687		struct xhci_input_control_ctx *ctrl_ctx,
1688		struct xhci_virt_device *virt_dev);
1689void xhci_endpoint_copy(struct xhci_hcd *xhci,
1690		struct xhci_container_ctx *in_ctx,
1691		struct xhci_container_ctx *out_ctx,
1692		unsigned int ep_index);
1693void xhci_slot_copy(struct xhci_hcd *xhci,
1694		struct xhci_container_ctx *in_ctx,
1695		struct xhci_container_ctx *out_ctx);
1696int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
1697		struct usb_device *udev, struct usb_host_endpoint *ep,
1698		gfp_t mem_flags);
 
 
1699void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
1700int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
1701				unsigned int num_trbs, gfp_t flags);
1702void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
 
1703		struct xhci_virt_device *virt_dev,
1704		unsigned int ep_index);
1705struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
1706		unsigned int num_stream_ctxs,
1707		unsigned int num_streams, gfp_t flags);
 
1708void xhci_free_stream_info(struct xhci_hcd *xhci,
1709		struct xhci_stream_info *stream_info);
1710void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
1711		struct xhci_ep_ctx *ep_ctx,
1712		struct xhci_stream_info *stream_info);
1713void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
1714		struct xhci_ep_ctx *ep_ctx,
1715		struct xhci_virt_ep *ep);
1716void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
1717	struct xhci_virt_device *virt_dev, bool drop_control_ep);
1718struct xhci_ring *xhci_dma_to_transfer_ring(
1719		struct xhci_virt_ep *ep,
1720		u64 address);
1721struct xhci_ring *xhci_stream_id_to_ring(
1722		struct xhci_virt_device *dev,
1723		unsigned int ep_index,
1724		unsigned int stream_id);
1725struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1726		bool allocate_in_ctx, bool allocate_completion,
1727		gfp_t mem_flags);
1728void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv);
 
1729void xhci_free_command(struct xhci_hcd *xhci,
1730		struct xhci_command *command);
1731
1732#ifdef CONFIG_PCI
1733/* xHCI PCI glue */
1734int xhci_register_pci(void);
1735void xhci_unregister_pci(void);
1736#else
1737static inline int xhci_register_pci(void) { return 0; }
1738static inline void xhci_unregister_pci(void) {}
1739#endif
1740
1741#if defined(CONFIG_USB_XHCI_PLATFORM) \
1742	|| defined(CONFIG_USB_XHCI_PLATFORM_MODULE)
1743int xhci_register_plat(void);
1744void xhci_unregister_plat(void);
1745#else
1746static inline int xhci_register_plat(void)
1747{ return 0; }
1748static inline void xhci_unregister_plat(void)
1749{  }
1750#endif
1751
1752/* xHCI host controller glue */
1753typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
1754int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
1755		u32 mask, u32 done, int usec);
 
1756void xhci_quiesce(struct xhci_hcd *xhci);
1757int xhci_halt(struct xhci_hcd *xhci);
1758int xhci_reset(struct xhci_hcd *xhci);
1759int xhci_init(struct usb_hcd *hcd);
1760int xhci_run(struct usb_hcd *hcd);
1761void xhci_stop(struct usb_hcd *hcd);
1762void xhci_shutdown(struct usb_hcd *hcd);
1763int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1764
1765#ifdef	CONFIG_PM
1766int xhci_suspend(struct xhci_hcd *xhci);
1767int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
1768#else
1769#define	xhci_suspend	NULL
1770#define	xhci_resume	NULL
1771#endif
1772
1773int xhci_get_frame(struct usb_hcd *hcd);
1774irqreturn_t xhci_irq(struct usb_hcd *hcd);
1775irqreturn_t xhci_msi_irq(int irq, void *hcd);
1776int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
1777void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
1778int xhci_alloc_tt_info(struct xhci_hcd *xhci,
1779		struct xhci_virt_device *virt_dev,
1780		struct usb_device *hdev,
1781		struct usb_tt *tt, gfp_t mem_flags);
1782int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
1783		struct usb_host_endpoint **eps, unsigned int num_eps,
1784		unsigned int num_streams, gfp_t mem_flags);
1785int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
1786		struct usb_host_endpoint **eps, unsigned int num_eps,
1787		gfp_t mem_flags);
1788int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
1789int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev);
1790int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev);
1791int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
1792				struct usb_device *udev, int enable);
1793int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
1794			struct usb_tt *tt, gfp_t mem_flags);
1795int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
1796int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
1797int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1798int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1799void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
1800int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
1801int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1802void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1803
1804/* xHCI ring, segment, TRB, and TD functions */
1805dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
1806struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1807		union xhci_trb *start_trb, union xhci_trb *end_trb,
1808		dma_addr_t suspect_dma);
1809int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
1810void xhci_ring_cmd_db(struct xhci_hcd *xhci);
1811int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
1812int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1813		u32 slot_id, enum xhci_setup_dev);
1814int xhci_queue_vendor_command(struct xhci_hcd *xhci,
 
1815		u32 field1, u32 field2, u32 field3, u32 field4);
1816int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1817		unsigned int ep_index, int suspend);
1818int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1819		int slot_id, unsigned int ep_index);
1820int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1821		int slot_id, unsigned int ep_index);
1822int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1823		int slot_id, unsigned int ep_index);
1824int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
1825		struct urb *urb, int slot_id, unsigned int ep_index);
1826int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1827		u32 slot_id, bool command_must_succeed);
1828int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1829		u32 slot_id, bool command_must_succeed);
1830int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
1831		unsigned int ep_index);
1832int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
1833void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
1834		unsigned int slot_id, unsigned int ep_index,
1835		unsigned int stream_id, struct xhci_td *cur_td,
1836		struct xhci_dequeue_state *state);
1837void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
1838		unsigned int slot_id, unsigned int ep_index,
1839		unsigned int stream_id,
1840		struct xhci_dequeue_state *deq_state);
1841void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1842		struct usb_device *udev, unsigned int ep_index);
1843void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
1844		unsigned int slot_id, unsigned int ep_index,
1845		struct xhci_dequeue_state *deq_state);
1846void xhci_stop_endpoint_command_watchdog(unsigned long arg);
1847int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
1848		union xhci_trb *cmd_trb);
1849void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
1850		unsigned int ep_index, unsigned int stream_id);
1851union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
 
 
 
 
 
 
 
 
1852
1853/* xHCI roothub code */
1854void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
1855				int port_id, u32 link_state);
1856int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
1857			struct usb_device *udev, enum usb3_link_state state);
1858int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
1859			struct usb_device *udev, enum usb3_link_state state);
1860void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
1861				int port_id, u32 port_bit);
1862int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
1863		char *buf, u16 wLength);
1864int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
1865int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
 
 
 
 
1866
1867#ifdef CONFIG_PM
1868int xhci_bus_suspend(struct usb_hcd *hcd);
1869int xhci_bus_resume(struct usb_hcd *hcd);
 
1870#else
1871#define	xhci_bus_suspend	NULL
1872#define	xhci_bus_resume		NULL
 
1873#endif	/* CONFIG_PM */
1874
1875u32 xhci_port_state_to_neutral(u32 state);
1876int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
1877		u16 port);
1878void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
1879
1880/* xHCI contexts */
1881struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
1882struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
1883struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
1884
1885/* xHCI quirks */
1886bool xhci_compliance_mode_recovery_timer_quirk_check(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1887
1888#endif /* __LINUX_XHCI_HCD_H */
v6.13.7
   1/* SPDX-License-Identifier: GPL-2.0 */
   2
   3/*
   4 * xHCI host controller driver
   5 *
   6 * Copyright (C) 2008 Intel Corp.
   7 *
   8 * Author: Sarah Sharp
   9 * Some code borrowed from the Linux EHCI driver.
 
 
 
 
 
 
 
 
 
 
 
 
 
  10 */
  11
  12#ifndef __LINUX_XHCI_HCD_H
  13#define __LINUX_XHCI_HCD_H
  14
  15#include <linux/usb.h>
  16#include <linux/timer.h>
  17#include <linux/kernel.h>
  18#include <linux/usb/hcd.h>
  19#include <linux/io-64-nonatomic-lo-hi.h>
  20#include <linux/io-64-nonatomic-hi-lo.h>
  21
  22/* Code sharing between pci-quirks and xhci hcd */
  23#include	"xhci-ext-caps.h"
  24#include "pci-quirks.h"
  25
  26#include "xhci-port.h"
  27#include "xhci-caps.h"
  28
  29/* max buffer size for trace and debug messages */
  30#define XHCI_MSG_MAX		500
  31
  32/* xHCI PCI Configuration Registers */
  33#define XHCI_SBRN_OFFSET	(0x60)
  34
  35/* Max number of USB devices for any host controller - limit in section 6.1 */
  36#define MAX_HC_SLOTS		256
  37/* Section 5.3.3 - MaxPorts */
  38#define MAX_HC_PORTS		127
  39
  40/*
  41 * xHCI register interface.
  42 * This corresponds to the eXtensible Host Controller Interface (xHCI)
  43 * Revision 0.95 specification
  44 */
  45
  46/**
  47 * struct xhci_cap_regs - xHCI Host Controller Capability Registers.
  48 * @hc_capbase:		length of the capabilities register and HC version number
  49 * @hcs_params1:	HCSPARAMS1 - Structural Parameters 1
  50 * @hcs_params2:	HCSPARAMS2 - Structural Parameters 2
  51 * @hcs_params3:	HCSPARAMS3 - Structural Parameters 3
  52 * @hcc_params:		HCCPARAMS - Capability Parameters
  53 * @db_off:		DBOFF - Doorbell array offset
  54 * @run_regs_off:	RTSOFF - Runtime register space offset
  55 * @hcc_params2:	HCCPARAMS2 Capability Parameters 2, xhci 1.1 only
  56 */
  57struct xhci_cap_regs {
  58	__le32	hc_capbase;
  59	__le32	hcs_params1;
  60	__le32	hcs_params2;
  61	__le32	hcs_params3;
  62	__le32	hcc_params;
  63	__le32	db_off;
  64	__le32	run_regs_off;
  65	__le32	hcc_params2; /* xhci 1.1 */
  66	/* Reserved up to (CAPLENGTH - 0x1C) */
  67};
  68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69/* Number of registers per port */
  70#define	NUM_PORT_REGS	4
  71
  72#define PORTSC		0
  73#define PORTPMSC	1
  74#define PORTLI		2
  75#define PORTHLPMC	3
  76
  77/**
  78 * struct xhci_op_regs - xHCI Host Controller Operational Registers.
  79 * @command:		USBCMD - xHC command register
  80 * @status:		USBSTS - xHC status register
  81 * @page_size:		This indicates the page size that the host controller
  82 * 			supports.  If bit n is set, the HC supports a page size
  83 * 			of 2^(n+12), up to a 128MB page size.
  84 * 			4K is the minimum page size.
  85 * @cmd_ring:		CRP - 64-bit Command Ring Pointer
  86 * @dcbaa_ptr:		DCBAAP - 64-bit Device Context Base Address Array Pointer
  87 * @config_reg:		CONFIG - Configure Register
  88 * @port_status_base:	PORTSCn - base address for Port Status and Control
  89 * 			Each port has a Port Status and Control register,
  90 * 			followed by a Port Power Management Status and Control
  91 * 			register, a Port Link Info register, and a reserved
  92 * 			register.
  93 * @port_power_base:	PORTPMSCn - base address for
  94 * 			Port Power Management Status and Control
  95 * @port_link_base:	PORTLIn - base address for Port Link Info (current
  96 * 			Link PM state and control) for USB 2.1 and USB 3.0
  97 * 			devices.
  98 */
  99struct xhci_op_regs {
 100	__le32	command;
 101	__le32	status;
 102	__le32	page_size;
 103	__le32	reserved1;
 104	__le32	reserved2;
 105	__le32	dev_notification;
 106	__le64	cmd_ring;
 107	/* rsvd: offset 0x20-2F */
 108	__le32	reserved3[4];
 109	__le64	dcbaa_ptr;
 110	__le32	config_reg;
 111	/* rsvd: offset 0x3C-3FF */
 112	__le32	reserved4[241];
 113	/* port 1 registers, which serve as a base address for other ports */
 114	__le32	port_status_base;
 115	__le32	port_power_base;
 116	__le32	port_link_base;
 117	__le32	reserved5;
 118	/* registers for ports 2-255 */
 119	__le32	reserved6[NUM_PORT_REGS*254];
 120};
 121
 122/* USBCMD - USB command - command bitmasks */
 123/* start/stop HC execution - do not write unless HC is halted*/
 124#define CMD_RUN		XHCI_CMD_RUN
 125/* Reset HC - resets internal HC state machine and all registers (except
 126 * PCI config regs).  HC does NOT drive a USB reset on the downstream ports.
 127 * The xHCI driver must reinitialize the xHC after setting this bit.
 128 */
 129#define CMD_RESET	(1 << 1)
 130/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
 131#define CMD_EIE		XHCI_CMD_EIE
 132/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
 133#define CMD_HSEIE	XHCI_CMD_HSEIE
 134/* bits 4:6 are reserved (and should be preserved on writes). */
 135/* light reset (port status stays unchanged) - reset completed when this is 0 */
 136#define CMD_LRESET	(1 << 7)
 137/* host controller save/restore state. */
 138#define CMD_CSS		(1 << 8)
 139#define CMD_CRS		(1 << 9)
 140/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
 141#define CMD_EWE		XHCI_CMD_EWE
 142/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
 143 * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
 144 * '0' means the xHC can power it off if all ports are in the disconnect,
 145 * disabled, or powered-off state.
 146 */
 147#define CMD_PM_INDEX	(1 << 11)
 148/* bit 14 Extended TBC Enable, changes Isoc TRB fields to support larger TBC */
 149#define CMD_ETE		(1 << 14)
 150/* bits 15:31 are reserved (and should be preserved on writes). */
 151
 152#define XHCI_RESET_LONG_USEC		(10 * 1000 * 1000)
 153#define XHCI_RESET_SHORT_USEC		(250 * 1000)
 154
 155/* IMAN - Interrupt Management Register */
 156#define IMAN_IE		(1 << 1)
 157#define IMAN_IP		(1 << 0)
 158
 159/* USBSTS - USB status - status bitmasks */
 160/* HC not running - set to 1 when run/stop bit is cleared. */
 161#define STS_HALT	XHCI_STS_HALT
 162/* serious error, e.g. PCI parity error.  The HC will clear the run/stop bit. */
 163#define STS_FATAL	(1 << 2)
 164/* event interrupt - clear this prior to clearing any IP flags in IR set*/
 165#define STS_EINT	(1 << 3)
 166/* port change detect */
 167#define STS_PORT	(1 << 4)
 168/* bits 5:7 reserved and zeroed */
 169/* save state status - '1' means xHC is saving state */
 170#define STS_SAVE	(1 << 8)
 171/* restore state status - '1' means xHC is restoring state */
 172#define STS_RESTORE	(1 << 9)
 173/* true: save or restore error */
 174#define STS_SRE		(1 << 10)
 175/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
 176#define STS_CNR		XHCI_STS_CNR
 177/* true: internal Host Controller Error - SW needs to reset and reinitialize */
 178#define STS_HCE		(1 << 12)
 179/* bits 13:31 reserved and should be preserved */
 180
 181/*
 182 * DNCTRL - Device Notification Control Register - dev_notification bitmasks
 183 * Generate a device notification event when the HC sees a transaction with a
 184 * notification type that matches a bit set in this bit field.
 185 */
 186#define	DEV_NOTE_MASK		(0xffff)
 187#define ENABLE_DEV_NOTE(x)	(1 << (x))
 188/* Most of the device notification types should only be used for debug.
 189 * SW does need to pay attention to function wake notifications.
 190 */
 191#define	DEV_NOTE_FWAKE		ENABLE_DEV_NOTE(1)
 192
 193/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
 194/* bit 0 is the command ring cycle state */
 195/* stop ring operation after completion of the currently executing command */
 196#define CMD_RING_PAUSE		(1 << 1)
 197/* stop ring immediately - abort the currently executing command */
 198#define CMD_RING_ABORT		(1 << 2)
 199/* true: command ring is running */
 200#define CMD_RING_RUNNING	(1 << 3)
 201/* bits 4:5 reserved and should be preserved */
 202/* Command Ring pointer - bit mask for the lower 32 bits. */
 203#define CMD_RING_RSVD_BITS	(0x3f)
 204
 205/* CONFIG - Configure Register - config_reg bitmasks */
 206/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
 207#define MAX_DEVS(p)	((p) & 0xff)
 208/* bit 8: U3 Entry Enabled, assert PLC when root port enters U3, xhci 1.1 */
 209#define CONFIG_U3E		(1 << 8)
 210/* bit 9: Configuration Information Enable, xhci 1.1 */
 211#define CONFIG_CIE		(1 << 9)
 212/* bits 10:31 - reserved and should be preserved */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 213
 214/**
 215 * struct xhci_intr_reg - Interrupt Register Set
 216 * @irq_pending:	IMAN - Interrupt Management Register.  Used to enable
 217 *			interrupts and check for pending interrupts.
 218 * @irq_control:	IMOD - Interrupt Moderation Register.
 219 * 			Used to throttle interrupts.
 220 * @erst_size:		Number of segments in the Event Ring Segment Table (ERST).
 221 * @erst_base:		ERST base address.
 222 * @erst_dequeue:	Event ring dequeue pointer.
 223 *
 224 * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
 225 * Ring Segment Table (ERST) associated with it.  The event ring is comprised of
 226 * multiple segments of the same size.  The HC places events on the ring and
 227 * "updates the Cycle bit in the TRBs to indicate to software the current
 228 * position of the Enqueue Pointer." The HCD (Linux) processes those events and
 229 * updates the dequeue pointer.
 230 */
 231struct xhci_intr_reg {
 232	__le32	irq_pending;
 233	__le32	irq_control;
 234	__le32	erst_size;
 235	__le32	rsvd;
 236	__le64	erst_base;
 237	__le64	erst_dequeue;
 238};
 239
 240/* irq_pending bitmasks */
 241#define	ER_IRQ_PENDING(p)	((p) & 0x1)
 242/* bits 2:31 need to be preserved */
 243/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
 244#define	ER_IRQ_CLEAR(p)		((p) & 0xfffffffe)
 245#define	ER_IRQ_ENABLE(p)	((ER_IRQ_CLEAR(p)) | 0x2)
 246#define	ER_IRQ_DISABLE(p)	((ER_IRQ_CLEAR(p)) & ~(0x2))
 247
 248/* irq_control bitmasks */
 249/* Minimum interval between interrupts (in 250ns intervals).  The interval
 250 * between interrupts will be longer if there are no events on the event ring.
 251 * Default is 4000 (1 ms).
 252 */
 253#define ER_IRQ_INTERVAL_MASK	(0xffff)
 254/* Counter used to count down the time to the next interrupt - HW use only */
 255#define ER_IRQ_COUNTER_MASK	(0xffff << 16)
 256
 257/* erst_size bitmasks */
 258/* Preserve bits 16:31 of erst_size */
 259#define	ERST_SIZE_MASK		(0xffff << 16)
 260
 261/* erst_base bitmasks */
 262#define ERST_BASE_RSVDP		(GENMASK_ULL(5, 0))
 263
 264/* erst_dequeue bitmasks */
 265/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
 266 * where the current dequeue pointer lies.  This is an optional HW hint.
 267 */
 268#define ERST_DESI_MASK		(0x7)
 269/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
 270 * a work queue (or delayed service routine)?
 271 */
 272#define ERST_EHB		(1 << 3)
 273#define ERST_PTR_MASK		(GENMASK_ULL(63, 4))
 274
 275/**
 276 * struct xhci_run_regs
 277 * @microframe_index:
 278 * 		MFINDEX - current microframe number
 279 *
 280 * Section 5.5 Host Controller Runtime Registers:
 281 * "Software should read and write these registers using only Dword (32 bit)
 282 * or larger accesses"
 283 */
 284struct xhci_run_regs {
 285	__le32			microframe_index;
 286	__le32			rsvd[7];
 287	struct xhci_intr_reg	ir_set[128];
 288};
 289
 290/**
 291 * struct doorbell_array
 292 *
 293 * Bits  0 -  7: Endpoint target
 294 * Bits  8 - 15: RsvdZ
 295 * Bits 16 - 31: Stream ID
 296 *
 297 * Section 5.6
 298 */
 299struct xhci_doorbell_array {
 300	__le32	doorbell[256];
 301};
 302
 303#define DB_VALUE(ep, stream)	((((ep) + 1) & 0xff) | ((stream) << 16))
 304#define DB_VALUE_HOST		0x00000000
 305
 306#define PLT_MASK        (0x03 << 6)
 307#define PLT_SYM         (0x00 << 6)
 308#define PLT_ASYM_RX     (0x02 << 6)
 309#define PLT_ASYM_TX     (0x03 << 6)
 
 
 
 
 
 
 
 
 
 
 
 
 
 310
 311/**
 312 * struct xhci_container_ctx
 313 * @type: Type of context.  Used to calculated offsets to contained contexts.
 314 * @size: Size of the context data
 315 * @bytes: The raw context data given to HW
 316 * @dma: dma address of the bytes
 317 *
 318 * Represents either a Device or Input context.  Holds a pointer to the raw
 319 * memory used for the context (bytes) and dma address of it (dma).
 320 */
 321struct xhci_container_ctx {
 322	unsigned type;
 323#define XHCI_CTX_TYPE_DEVICE  0x1
 324#define XHCI_CTX_TYPE_INPUT   0x2
 325
 326	int size;
 327
 328	u8 *bytes;
 329	dma_addr_t dma;
 330};
 331
 332/**
 333 * struct xhci_slot_ctx
 334 * @dev_info:	Route string, device speed, hub info, and last valid endpoint
 335 * @dev_info2:	Max exit latency for device number, root hub port number
 336 * @tt_info:	tt_info is used to construct split transaction tokens
 337 * @dev_state:	slot state and device address
 338 *
 339 * Slot Context - section 6.2.1.1.  This assumes the HC uses 32-byte context
 340 * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
 341 * reserved at the end of the slot context for HC internal use.
 342 */
 343struct xhci_slot_ctx {
 344	__le32	dev_info;
 345	__le32	dev_info2;
 346	__le32	tt_info;
 347	__le32	dev_state;
 348	/* offset 0x10 to 0x1f reserved for HC internal use */
 349	__le32	reserved[4];
 350};
 351
 352/* dev_info bitmasks */
 353/* Route String - 0:19 */
 354#define ROUTE_STRING_MASK	(0xfffff)
 355/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
 356#define DEV_SPEED	(0xf << 20)
 357#define GET_DEV_SPEED(n) (((n) & DEV_SPEED) >> 20)
 358/* bit 24 reserved */
 359/* Is this LS/FS device connected through a HS hub? - bit 25 */
 360#define DEV_MTT		(0x1 << 25)
 361/* Set if the device is a hub - bit 26 */
 362#define DEV_HUB		(0x1 << 26)
 363/* Index of the last valid endpoint context in this device context - 27:31 */
 364#define LAST_CTX_MASK	(0x1f << 27)
 365#define LAST_CTX(p)	((p) << 27)
 366#define LAST_CTX_TO_EP_NUM(p)	(((p) >> 27) - 1)
 367#define SLOT_FLAG	(1 << 0)
 368#define EP0_FLAG	(1 << 1)
 369
 370/* dev_info2 bitmasks */
 371/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
 372#define MAX_EXIT	(0xffff)
 373/* Root hub port number that is needed to access the USB device */
 374#define ROOT_HUB_PORT(p)	(((p) & 0xff) << 16)
 375#define DEVINFO_TO_ROOT_HUB_PORT(p)	(((p) >> 16) & 0xff)
 376/* Maximum number of ports under a hub device */
 377#define XHCI_MAX_PORTS(p)	(((p) & 0xff) << 24)
 378#define DEVINFO_TO_MAX_PORTS(p)	(((p) & (0xff << 24)) >> 24)
 379
 380/* tt_info bitmasks */
 381/*
 382 * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
 383 * The Slot ID of the hub that isolates the high speed signaling from
 384 * this low or full-speed device.  '0' if attached to root hub port.
 385 */
 386#define TT_SLOT		(0xff)
 387/*
 388 * The number of the downstream facing port of the high-speed hub
 389 * '0' if the device is not low or full speed.
 390 */
 391#define TT_PORT		(0xff << 8)
 392#define TT_THINK_TIME(p)	(((p) & 0x3) << 16)
 393#define GET_TT_THINK_TIME(p)	(((p) & (0x3 << 16)) >> 16)
 394
 395/* dev_state bitmasks */
 396/* USB device address - assigned by the HC */
 397#define DEV_ADDR_MASK	(0xff)
 398/* bits 8:26 reserved */
 399/* Slot state */
 400#define SLOT_STATE	(0x1f << 27)
 401#define GET_SLOT_STATE(p)	(((p) & (0x1f << 27)) >> 27)
 402
 403#define SLOT_STATE_DISABLED	0
 404#define SLOT_STATE_ENABLED	SLOT_STATE_DISABLED
 405#define SLOT_STATE_DEFAULT	1
 406#define SLOT_STATE_ADDRESSED	2
 407#define SLOT_STATE_CONFIGURED	3
 408
 409/**
 410 * struct xhci_ep_ctx
 411 * @ep_info:	endpoint state, streams, mult, and interval information.
 412 * @ep_info2:	information on endpoint type, max packet size, max burst size,
 413 * 		error count, and whether the HC will force an event for all
 414 * 		transactions.
 415 * @deq:	64-bit ring dequeue pointer address.  If the endpoint only
 416 * 		defines one stream, this points to the endpoint transfer ring.
 417 * 		Otherwise, it points to a stream context array, which has a
 418 * 		ring pointer for each flow.
 419 * @tx_info:
 420 * 		Average TRB lengths for the endpoint ring and
 421 * 		max payload within an Endpoint Service Interval Time (ESIT).
 422 *
 423 * Endpoint Context - section 6.2.1.2.  This assumes the HC uses 32-byte context
 424 * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
 425 * reserved at the end of the endpoint context for HC internal use.
 426 */
 427struct xhci_ep_ctx {
 428	__le32	ep_info;
 429	__le32	ep_info2;
 430	__le64	deq;
 431	__le32	tx_info;
 432	/* offset 0x14 - 0x1f reserved for HC internal use */
 433	__le32	reserved[3];
 434};
 435
 436/* ep_info bitmasks */
 437/*
 438 * Endpoint State - bits 0:2
 439 * 0 - disabled
 440 * 1 - running
 441 * 2 - halted due to halt condition - ok to manipulate endpoint ring
 442 * 3 - stopped
 443 * 4 - TRB error
 444 * 5-7 - reserved
 445 */
 446#define EP_STATE_MASK		(0x7)
 447#define EP_STATE_DISABLED	0
 448#define EP_STATE_RUNNING	1
 449#define EP_STATE_HALTED		2
 450#define EP_STATE_STOPPED	3
 451#define EP_STATE_ERROR		4
 452#define GET_EP_CTX_STATE(ctx)	(le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK)
 453
 454/* Mult - Max number of burtst within an interval, in EP companion desc. */
 455#define EP_MULT(p)		(((p) & 0x3) << 8)
 456#define CTX_TO_EP_MULT(p)	(((p) >> 8) & 0x3)
 457/* bits 10:14 are Max Primary Streams */
 458/* bit 15 is Linear Stream Array */
 459/* Interval - period between requests to an endpoint - 125u increments. */
 460#define EP_INTERVAL(p)			(((p) & 0xff) << 16)
 461#define EP_INTERVAL_TO_UFRAMES(p)	(1 << (((p) >> 16) & 0xff))
 462#define CTX_TO_EP_INTERVAL(p)		(((p) >> 16) & 0xff)
 463#define EP_MAXPSTREAMS_MASK		(0x1f << 10)
 464#define EP_MAXPSTREAMS(p)		(((p) << 10) & EP_MAXPSTREAMS_MASK)
 465#define CTX_TO_EP_MAXPSTREAMS(p)	(((p) & EP_MAXPSTREAMS_MASK) >> 10)
 466/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
 467#define	EP_HAS_LSA		(1 << 15)
 468/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
 469#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p)	(((p) >> 24) & 0xff)
 470
 471/* ep_info2 bitmasks */
 472/*
 473 * Force Event - generate transfer events for all TRBs for this endpoint
 474 * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
 475 */
 476#define	FORCE_EVENT	(0x1)
 477#define ERROR_COUNT(p)	(((p) & 0x3) << 1)
 478#define CTX_TO_EP_TYPE(p)	(((p) >> 3) & 0x7)
 479#define EP_TYPE(p)	((p) << 3)
 480#define ISOC_OUT_EP	1
 481#define BULK_OUT_EP	2
 482#define INT_OUT_EP	3
 483#define CTRL_EP		4
 484#define ISOC_IN_EP	5
 485#define BULK_IN_EP	6
 486#define INT_IN_EP	7
 487/* bit 6 reserved */
 488/* bit 7 is Host Initiate Disable - for disabling stream selection */
 489#define MAX_BURST(p)	(((p)&0xff) << 8)
 490#define CTX_TO_MAX_BURST(p)	(((p) >> 8) & 0xff)
 491#define MAX_PACKET(p)	(((p)&0xffff) << 16)
 492#define MAX_PACKET_MASK		(0xffff << 16)
 493#define MAX_PACKET_DECODED(p)	(((p) >> 16) & 0xffff)
 494
 
 
 
 
 
 495/* tx_info bitmasks */
 496#define EP_AVG_TRB_LENGTH(p)		((p) & 0xffff)
 497#define EP_MAX_ESIT_PAYLOAD_LO(p)	(((p) & 0xffff) << 16)
 498#define EP_MAX_ESIT_PAYLOAD_HI(p)	((((p) >> 16) & 0xff) << 24)
 499#define CTX_TO_MAX_ESIT_PAYLOAD(p)	(((p) >> 16) & 0xffff)
 500
 501/* deq bitmasks */
 502#define EP_CTX_CYCLE_MASK		(1 << 0)
 503#define SCTX_DEQ_MASK			(~0xfL)
 504
 505
 506/**
 507 * struct xhci_input_control_context
 508 * Input control context; see section 6.2.5.
 509 *
 510 * @drop_context:	set the bit of the endpoint context you want to disable
 511 * @add_context:	set the bit of the endpoint context you want to enable
 512 */
 513struct xhci_input_control_ctx {
 514	__le32	drop_flags;
 515	__le32	add_flags;
 516	__le32	rsvd2[6];
 517};
 518
 519#define	EP_IS_ADDED(ctrl_ctx, i) \
 520	(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))
 521#define	EP_IS_DROPPED(ctrl_ctx, i)       \
 522	(le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1)))
 523
 524/* Represents everything that is needed to issue a command on the command ring.
 525 * It's useful to pre-allocate these for commands that cannot fail due to
 526 * out-of-memory errors, like freeing streams.
 527 */
 528struct xhci_command {
 529	/* Input context for changing device state */
 530	struct xhci_container_ctx	*in_ctx;
 531	u32				status;
 532	int				slot_id;
 533	/* If completion is null, no one is waiting on this command
 534	 * and the structure can be freed after the command completes.
 535	 */
 536	struct completion		*completion;
 537	union xhci_trb			*command_trb;
 538	struct list_head		cmd_list;
 539	/* xHCI command response timeout in milliseconds */
 540	unsigned int			timeout_ms;
 541};
 542
 543/* drop context bitmasks */
 544#define	DROP_EP(x)	(0x1 << x)
 545/* add context bitmasks */
 546#define	ADD_EP(x)	(0x1 << x)
 547
 548struct xhci_stream_ctx {
 549	/* 64-bit stream ring address, cycle state, and stream type */
 550	__le64	stream_ring;
 551	/* offset 0x14 - 0x1f reserved for HC internal use */
 552	__le32	reserved[2];
 553};
 554
 555/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
 556#define	SCT_FOR_CTX(p)		(((p) & 0x7) << 1)
 557#define	CTX_TO_SCT(p)		(((p) >> 1) & 0x7)
 558/* Secondary stream array type, dequeue pointer is to a transfer ring */
 559#define	SCT_SEC_TR		0
 560/* Primary stream array type, dequeue pointer is to a transfer ring */
 561#define	SCT_PRI_TR		1
 562/* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */
 563#define SCT_SSA_8		2
 564#define SCT_SSA_16		3
 565#define SCT_SSA_32		4
 566#define SCT_SSA_64		5
 567#define SCT_SSA_128		6
 568#define SCT_SSA_256		7
 569
 570/* Assume no secondary streams for now */
 571struct xhci_stream_info {
 572	struct xhci_ring		**stream_rings;
 573	/* Number of streams, including stream 0 (which drivers can't use) */
 574	unsigned int			num_streams;
 575	/* The stream context array may be bigger than
 576	 * the number of streams the driver asked for
 577	 */
 578	struct xhci_stream_ctx		*stream_ctx_array;
 579	unsigned int			num_stream_ctxs;
 580	dma_addr_t			ctx_array_dma;
 581	/* For mapping physical TRB addresses to segments in stream rings */
 582	struct radix_tree_root		trb_address_map;
 583	struct xhci_command		*free_streams_command;
 584};
 585
 586#define	SMALL_STREAM_ARRAY_SIZE		256
 587#define	MEDIUM_STREAM_ARRAY_SIZE	1024
 588
 589/* Some Intel xHCI host controllers need software to keep track of the bus
 590 * bandwidth.  Keep track of endpoint info here.  Each root port is allocated
 591 * the full bus bandwidth.  We must also treat TTs (including each port under a
 592 * multi-TT hub) as a separate bandwidth domain.  The direct memory interface
 593 * (DMI) also limits the total bandwidth (across all domains) that can be used.
 594 */
 595struct xhci_bw_info {
 596	/* ep_interval is zero-based */
 597	unsigned int		ep_interval;
 598	/* mult and num_packets are one-based */
 599	unsigned int		mult;
 600	unsigned int		num_packets;
 601	unsigned int		max_packet_size;
 602	unsigned int		max_esit_payload;
 603	unsigned int		type;
 604};
 605
 606/* "Block" sizes in bytes the hardware uses for different device speeds.
 607 * The logic in this part of the hardware limits the number of bits the hardware
 608 * can use, so must represent bandwidth in a less precise manner to mimic what
 609 * the scheduler hardware computes.
 610 */
 611#define	FS_BLOCK	1
 612#define	HS_BLOCK	4
 613#define	SS_BLOCK	16
 614#define	DMI_BLOCK	32
 615
 616/* Each device speed has a protocol overhead (CRC, bit stuffing, etc) associated
 617 * with each byte transferred.  SuperSpeed devices have an initial overhead to
 618 * set up bursts.  These are in blocks, see above.  LS overhead has already been
 619 * translated into FS blocks.
 620 */
 621#define DMI_OVERHEAD 8
 622#define DMI_OVERHEAD_BURST 4
 623#define SS_OVERHEAD 8
 624#define SS_OVERHEAD_BURST 32
 625#define HS_OVERHEAD 26
 626#define FS_OVERHEAD 20
 627#define LS_OVERHEAD 128
 628/* The TTs need to claim roughly twice as much bandwidth (94 bytes per
 629 * microframe ~= 24Mbps) of the HS bus as the devices can actually use because
 630 * of overhead associated with split transfers crossing microframe boundaries.
 631 * 31 blocks is pure protocol overhead.
 632 */
 633#define TT_HS_OVERHEAD (31 + 94)
 634#define TT_DMI_OVERHEAD (25 + 12)
 635
 636/* Bandwidth limits in blocks */
 637#define FS_BW_LIMIT		1285
 638#define TT_BW_LIMIT		1320
 639#define HS_BW_LIMIT		1607
 640#define SS_BW_LIMIT_IN		3906
 641#define DMI_BW_LIMIT_IN		3906
 642#define SS_BW_LIMIT_OUT		3906
 643#define DMI_BW_LIMIT_OUT	3906
 644
 645/* Percentage of bus bandwidth reserved for non-periodic transfers */
 646#define FS_BW_RESERVED		10
 647#define HS_BW_RESERVED		20
 648#define SS_BW_RESERVED		10
 649
 650struct xhci_virt_ep {
 651	struct xhci_virt_device		*vdev;	/* parent */
 652	unsigned int			ep_index;
 653	struct xhci_ring		*ring;
 654	/* Related to endpoints that are configured to use stream IDs only */
 655	struct xhci_stream_info		*stream_info;
 656	/* Temporary storage in case the configure endpoint command fails and we
 657	 * have to restore the device state to the previous state
 658	 */
 659	struct xhci_ring		*new_ring;
 660	unsigned int			err_count;
 661	unsigned int			ep_state;
 662#define SET_DEQ_PENDING		(1 << 0)
 663#define EP_HALTED		(1 << 1)	/* For stall handling */
 664#define EP_STOP_CMD_PENDING	(1 << 2)	/* For URB cancellation */
 665/* Transitioning the endpoint to using streams, don't enqueue URBs */
 666#define EP_GETTING_STREAMS	(1 << 3)
 667#define EP_HAS_STREAMS		(1 << 4)
 668/* Transitioning the endpoint to not using streams, don't enqueue URBs */
 669#define EP_GETTING_NO_STREAMS	(1 << 5)
 670#define EP_HARD_CLEAR_TOGGLE	(1 << 6)
 671#define EP_SOFT_CLEAR_TOGGLE	(1 << 7)
 672/* usb_hub_clear_tt_buffer is in progress */
 673#define EP_CLEARING_TT		(1 << 8)
 674	/* ----  Related to URB cancellation ---- */
 675	struct list_head	cancelled_td_list;
 
 
 
 
 
 676	struct xhci_hcd		*xhci;
 677	/* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue
 678	 * command.  We'll need to update the ring's dequeue segment and dequeue
 679	 * pointer after the command completes.
 680	 */
 681	struct xhci_segment	*queued_deq_seg;
 682	union xhci_trb		*queued_deq_ptr;
 683	/*
 684	 * Sometimes the xHC can not process isochronous endpoint ring quickly
 685	 * enough, and it will miss some isoc tds on the ring and generate
 686	 * a Missed Service Error Event.
 687	 * Set skip flag when receive a Missed Service Error Event and
 688	 * process the missed tds on the endpoint ring.
 689	 */
 690	bool			skip;
 691	/* Bandwidth checking storage */
 692	struct xhci_bw_info	bw_info;
 693	struct list_head	bw_endpoint_list;
 694	unsigned long		stop_time;
 695	/* Isoch Frame ID checking storage */
 696	int			next_frame_id;
 697	/* Use new Isoch TRB layout needed for extended TBC support */
 698	bool			use_extended_tbc;
 699};
 700
 701enum xhci_overhead_type {
 702	LS_OVERHEAD_TYPE = 0,
 703	FS_OVERHEAD_TYPE,
 704	HS_OVERHEAD_TYPE,
 705};
 706
 707struct xhci_interval_bw {
 708	unsigned int		num_packets;
 709	/* Sorted by max packet size.
 710	 * Head of the list is the greatest max packet size.
 711	 */
 712	struct list_head	endpoints;
 713	/* How many endpoints of each speed are present. */
 714	unsigned int		overhead[3];
 715};
 716
 717#define	XHCI_MAX_INTERVAL	16
 718
 719struct xhci_interval_bw_table {
 720	unsigned int		interval0_esit_payload;
 721	struct xhci_interval_bw	interval_bw[XHCI_MAX_INTERVAL];
 722	/* Includes reserved bandwidth for async endpoints */
 723	unsigned int		bw_used;
 724	unsigned int		ss_bw_in;
 725	unsigned int		ss_bw_out;
 726};
 727
 728#define EP_CTX_PER_DEV		31
 729
 730struct xhci_virt_device {
 731	int				slot_id;
 732	struct usb_device		*udev;
 733	/*
 734	 * Commands to the hardware are passed an "input context" that
 735	 * tells the hardware what to change in its data structures.
 736	 * The hardware will return changes in an "output context" that
 737	 * software must allocate for the hardware.  We need to keep
 738	 * track of input and output contexts separately because
 739	 * these commands might fail and we don't trust the hardware.
 740	 */
 741	struct xhci_container_ctx       *out_ctx;
 742	/* Used for addressing devices and configuration changes */
 743	struct xhci_container_ctx       *in_ctx;
 744	struct xhci_virt_ep		eps[EP_CTX_PER_DEV];
 745	struct xhci_port		*rhub_port;
 
 
 
 
 
 
 
 
 
 746	struct xhci_interval_bw_table	*bw_table;
 747	struct xhci_tt_bw_info		*tt_info;
 748	/*
 749	 * flags for state tracking based on events and issued commands.
 750	 * Software can not rely on states from output contexts because of
 751	 * latency between events and xHC updating output context values.
 752	 * See xhci 1.1 section 4.8.3 for more details
 753	 */
 754	unsigned long			flags;
 755#define VDEV_PORT_ERROR			BIT(0) /* Port error, link inactive */
 756
 757	/* The current max exit latency for the enabled USB3 link states. */
 758	u16				current_mel;
 759	/* Used for the debugfs interfaces. */
 760	void				*debugfs_private;
 761};
 762
 763/*
 764 * For each roothub, keep track of the bandwidth information for each periodic
 765 * interval.
 766 *
 767 * If a high speed hub is attached to the roothub, each TT associated with that
 768 * hub is a separate bandwidth domain.  The interval information for the
 769 * endpoints on the devices under that TT will appear in the TT structure.
 770 */
 771struct xhci_root_port_bw_info {
 772	struct list_head		tts;
 773	unsigned int			num_active_tts;
 774	struct xhci_interval_bw_table	bw_table;
 775};
 776
 777struct xhci_tt_bw_info {
 778	struct list_head		tt_list;
 779	int				slot_id;
 780	int				ttport;
 781	struct xhci_interval_bw_table	bw_table;
 782	int				active_eps;
 783};
 784
 785
 786/**
 787 * struct xhci_device_context_array
 788 * @dev_context_ptr	array of 64-bit DMA addresses for device contexts
 789 */
 790struct xhci_device_context_array {
 791	/* 64-bit device addresses; we only write 32-bit addresses */
 792	__le64			dev_context_ptrs[MAX_HC_SLOTS];
 793	/* private xHCD pointers */
 794	dma_addr_t	dma;
 795};
 796/* TODO: write function to set the 64-bit device DMA address */
 797/*
 798 * TODO: change this to be dynamically sized at HC mem init time since the HC
 799 * might not be able to handle the maximum number of devices possible.
 800 */
 801
 802
 803struct xhci_transfer_event {
 804	/* 64-bit buffer address, or immediate data */
 805	__le64	buffer;
 806	__le32	transfer_len;
 807	/* This field is interpreted differently based on the type of TRB */
 808	__le32	flags;
 809};
 810
 811/* Transfer event flags bitfield, also for select command completion events */
 812#define TRB_TO_SLOT_ID(p)	(((p) >> 24) & 0xff)
 813#define SLOT_ID_FOR_TRB(p)	(((p) & 0xff) << 24)
 814
 815#define TRB_TO_EP_ID(p)		(((p) >> 16) & 0x1f) /* Endpoint ID 1 - 31 */
 816#define EP_ID_FOR_TRB(p)	(((p) & 0x1f) << 16)
 817
 818#define TRB_TO_EP_INDEX(p)	(TRB_TO_EP_ID(p) - 1) /* Endpoint index 0 - 30 */
 819#define EP_INDEX_FOR_TRB(p)	((((p) + 1) & 0x1f) << 16)
 820
 821/* Transfer event TRB length bit mask */
 
 822#define	EVENT_TRB_LEN(p)		((p) & 0xffffff)
 823
 
 
 
 824/* Completion Code - only applicable for some types of TRBs */
 825#define	COMP_CODE_MASK		(0xff << 24)
 826#define GET_COMP_CODE(p)	(((p) & COMP_CODE_MASK) >> 24)
 827#define COMP_INVALID				0
 828#define COMP_SUCCESS				1
 829#define COMP_DATA_BUFFER_ERROR			2
 830#define COMP_BABBLE_DETECTED_ERROR		3
 831#define COMP_USB_TRANSACTION_ERROR		4
 832#define COMP_TRB_ERROR				5
 833#define COMP_STALL_ERROR			6
 834#define COMP_RESOURCE_ERROR			7
 835#define COMP_BANDWIDTH_ERROR			8
 836#define COMP_NO_SLOTS_AVAILABLE_ERROR		9
 837#define COMP_INVALID_STREAM_TYPE_ERROR		10
 838#define COMP_SLOT_NOT_ENABLED_ERROR		11
 839#define COMP_ENDPOINT_NOT_ENABLED_ERROR		12
 840#define COMP_SHORT_PACKET			13
 841#define COMP_RING_UNDERRUN			14
 842#define COMP_RING_OVERRUN			15
 843#define COMP_VF_EVENT_RING_FULL_ERROR		16
 844#define COMP_PARAMETER_ERROR			17
 845#define COMP_BANDWIDTH_OVERRUN_ERROR		18
 846#define COMP_CONTEXT_STATE_ERROR		19
 847#define COMP_NO_PING_RESPONSE_ERROR		20
 848#define COMP_EVENT_RING_FULL_ERROR		21
 849#define COMP_INCOMPATIBLE_DEVICE_ERROR		22
 850#define COMP_MISSED_SERVICE_ERROR		23
 851#define COMP_COMMAND_RING_STOPPED		24
 852#define COMP_COMMAND_ABORTED			25
 853#define COMP_STOPPED				26
 854#define COMP_STOPPED_LENGTH_INVALID		27
 855#define COMP_STOPPED_SHORT_PACKET		28
 856#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR	29
 857#define COMP_ISOCH_BUFFER_OVERRUN		31
 858#define COMP_EVENT_LOST_ERROR			32
 859#define COMP_UNDEFINED_ERROR			33
 860#define COMP_INVALID_STREAM_ID_ERROR		34
 861#define COMP_SECONDARY_BANDWIDTH_ERROR		35
 862#define COMP_SPLIT_TRANSACTION_ERROR		36
 863
 864static inline const char *xhci_trb_comp_code_string(u8 status)
 865{
 866	switch (status) {
 867	case COMP_INVALID:
 868		return "Invalid";
 869	case COMP_SUCCESS:
 870		return "Success";
 871	case COMP_DATA_BUFFER_ERROR:
 872		return "Data Buffer Error";
 873	case COMP_BABBLE_DETECTED_ERROR:
 874		return "Babble Detected";
 875	case COMP_USB_TRANSACTION_ERROR:
 876		return "USB Transaction Error";
 877	case COMP_TRB_ERROR:
 878		return "TRB Error";
 879	case COMP_STALL_ERROR:
 880		return "Stall Error";
 881	case COMP_RESOURCE_ERROR:
 882		return "Resource Error";
 883	case COMP_BANDWIDTH_ERROR:
 884		return "Bandwidth Error";
 885	case COMP_NO_SLOTS_AVAILABLE_ERROR:
 886		return "No Slots Available Error";
 887	case COMP_INVALID_STREAM_TYPE_ERROR:
 888		return "Invalid Stream Type Error";
 889	case COMP_SLOT_NOT_ENABLED_ERROR:
 890		return "Slot Not Enabled Error";
 891	case COMP_ENDPOINT_NOT_ENABLED_ERROR:
 892		return "Endpoint Not Enabled Error";
 893	case COMP_SHORT_PACKET:
 894		return "Short Packet";
 895	case COMP_RING_UNDERRUN:
 896		return "Ring Underrun";
 897	case COMP_RING_OVERRUN:
 898		return "Ring Overrun";
 899	case COMP_VF_EVENT_RING_FULL_ERROR:
 900		return "VF Event Ring Full Error";
 901	case COMP_PARAMETER_ERROR:
 902		return "Parameter Error";
 903	case COMP_BANDWIDTH_OVERRUN_ERROR:
 904		return "Bandwidth Overrun Error";
 905	case COMP_CONTEXT_STATE_ERROR:
 906		return "Context State Error";
 907	case COMP_NO_PING_RESPONSE_ERROR:
 908		return "No Ping Response Error";
 909	case COMP_EVENT_RING_FULL_ERROR:
 910		return "Event Ring Full Error";
 911	case COMP_INCOMPATIBLE_DEVICE_ERROR:
 912		return "Incompatible Device Error";
 913	case COMP_MISSED_SERVICE_ERROR:
 914		return "Missed Service Error";
 915	case COMP_COMMAND_RING_STOPPED:
 916		return "Command Ring Stopped";
 917	case COMP_COMMAND_ABORTED:
 918		return "Command Aborted";
 919	case COMP_STOPPED:
 920		return "Stopped";
 921	case COMP_STOPPED_LENGTH_INVALID:
 922		return "Stopped - Length Invalid";
 923	case COMP_STOPPED_SHORT_PACKET:
 924		return "Stopped - Short Packet";
 925	case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
 926		return "Max Exit Latency Too Large Error";
 927	case COMP_ISOCH_BUFFER_OVERRUN:
 928		return "Isoch Buffer Overrun";
 929	case COMP_EVENT_LOST_ERROR:
 930		return "Event Lost Error";
 931	case COMP_UNDEFINED_ERROR:
 932		return "Undefined Error";
 933	case COMP_INVALID_STREAM_ID_ERROR:
 934		return "Invalid Stream ID Error";
 935	case COMP_SECONDARY_BANDWIDTH_ERROR:
 936		return "Secondary Bandwidth Error";
 937	case COMP_SPLIT_TRANSACTION_ERROR:
 938		return "Split Transaction Error";
 939	default:
 940		return "Unknown!!";
 941	}
 942}
 943
 944struct xhci_link_trb {
 945	/* 64-bit segment pointer*/
 946	__le64 segment_ptr;
 947	__le32 intr_target;
 948	__le32 control;
 949};
 950
 951/* control bitfields */
 952#define LINK_TOGGLE	(0x1<<1)
 953
 954/* Command completion event TRB */
 955struct xhci_event_cmd {
 956	/* Pointer to command TRB, or the value passed by the event data trb */
 957	__le64 cmd_trb;
 958	__le32 status;
 959	__le32 flags;
 960};
 961
 
 
 962/* Address device - disable SetAddress */
 963#define TRB_BSR		(1<<9)
 964
 965/* Configure Endpoint - Deconfigure */
 966#define TRB_DC		(1<<9)
 967
 968/* Stop Ring - Transfer State Preserve */
 969#define TRB_TSP		(1<<9)
 970
 971enum xhci_ep_reset_type {
 972	EP_HARD_RESET,
 973	EP_SOFT_RESET,
 974};
 975
 976/* Force Event */
 977#define TRB_TO_VF_INTR_TARGET(p)	(((p) & (0x3ff << 22)) >> 22)
 978#define TRB_TO_VF_ID(p)			(((p) & (0xff << 16)) >> 16)
 979
 980/* Set Latency Tolerance Value */
 981#define TRB_TO_BELT(p)			(((p) & (0xfff << 16)) >> 16)
 982
 983/* Get Port Bandwidth */
 984#define TRB_TO_DEV_SPEED(p)		(((p) & (0xf << 16)) >> 16)
 985
 986/* Force Header */
 987#define TRB_TO_PACKET_TYPE(p)		((p) & 0x1f)
 988#define TRB_TO_ROOTHUB_PORT(p)		(((p) & (0xff << 24)) >> 24)
 989
 990enum xhci_setup_dev {
 991	SETUP_CONTEXT_ONLY,
 992	SETUP_CONTEXT_ADDRESS,
 993};
 994
 995/* bits 16:23 are the virtual function ID */
 996/* bits 24:31 are the slot ID */
 
 
 997
 998/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
 
 
 
 999#define SUSPEND_PORT_FOR_TRB(p)		(((p) & 1) << 23)
1000#define TRB_TO_SUSPEND_PORT(p)		(((p) & (1 << 23)) >> 23)
1001#define LAST_EP_INDEX			30
1002
1003/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
1004#define TRB_TO_STREAM_ID(p)		((((p) & (0xffff << 16)) >> 16))
1005#define STREAM_ID_FOR_TRB(p)		((((p)) & 0xffff) << 16)
1006#define SCT_FOR_TRB(p)			(((p) & 0x7) << 1)
1007
1008/* Link TRB specific fields */
1009#define TRB_TC			(1<<1)
1010
1011/* Port Status Change Event TRB fields */
1012/* Port ID - bits 31:24 */
1013#define GET_PORT_ID(p)		(((p) & (0xff << 24)) >> 24)
1014
1015#define EVENT_DATA		(1 << 2)
1016
1017/* Normal TRB fields */
1018/* transfer_len bitmasks - bits 0:16 */
1019#define	TRB_LEN(p)		((p) & 0x1ffff)
1020/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31) */
1021#define TRB_TD_SIZE(p)          (min((p), (u32)31) << 17)
1022#define GET_TD_SIZE(p)		(((p) & 0x3e0000) >> 17)
1023/* xhci 1.1 uses the TD_SIZE field for TBC if Extended TBC is enabled (ETE) */
1024#define TRB_TD_SIZE_TBC(p)      (min((p), (u32)31) << 17)
1025/* Interrupter Target - which MSI-X vector to target the completion event at */
1026#define TRB_INTR_TARGET(p)	(((p) & 0x3ff) << 22)
1027#define GET_INTR_TARGET(p)	(((p) >> 22) & 0x3ff)
 
 
1028
1029/* Cycle bit - indicates TRB ownership by HC or HCD */
1030#define TRB_CYCLE		(1<<0)
1031/*
1032 * Force next event data TRB to be evaluated before task switch.
1033 * Used to pass OS data back after a TD completes.
1034 */
1035#define TRB_ENT			(1<<1)
1036/* Interrupt on short packet */
1037#define TRB_ISP			(1<<2)
1038/* Set PCIe no snoop attribute */
1039#define TRB_NO_SNOOP		(1<<3)
1040/* Chain multiple TRBs into a TD */
1041#define TRB_CHAIN		(1<<4)
1042/* Interrupt on completion */
1043#define TRB_IOC			(1<<5)
1044/* The buffer pointer contains immediate data */
1045#define TRB_IDT			(1<<6)
1046/* TDs smaller than this might use IDT */
1047#define TRB_IDT_MAX_SIZE	8
1048
1049/* Block Event Interrupt */
1050#define	TRB_BEI			(1<<9)
1051
1052/* Control transfer TRB specific fields */
1053#define TRB_DIR_IN		(1<<16)
1054#define	TRB_TX_TYPE(p)		((p) << 16)
1055#define	TRB_DATA_OUT		2
1056#define	TRB_DATA_IN		3
1057
1058/* Isochronous TRB specific fields */
1059#define TRB_SIA			(1<<31)
1060#define TRB_FRAME_ID(p)		(((p) & 0x7ff) << 20)
1061#define GET_FRAME_ID(p)		(((p) >> 20) & 0x7ff)
1062/* Total burst count field, Rsvdz on xhci 1.1 with Extended TBC enabled (ETE) */
1063#define TRB_TBC(p)		(((p) & 0x3) << 7)
1064#define GET_TBC(p)		(((p) >> 7) & 0x3)
1065#define TRB_TLBPC(p)		(((p) & 0xf) << 16)
1066#define GET_TLBPC(p)		(((p) >> 16) & 0xf)
1067
1068/* TRB cache size for xHC with TRB cache */
1069#define TRB_CACHE_SIZE_HS	8
1070#define TRB_CACHE_SIZE_SS	16
1071
1072struct xhci_generic_trb {
1073	__le32 field[4];
1074};
1075
1076union xhci_trb {
1077	struct xhci_link_trb		link;
1078	struct xhci_transfer_event	trans_event;
1079	struct xhci_event_cmd		event_cmd;
1080	struct xhci_generic_trb		generic;
1081};
1082
1083/* TRB bit mask */
1084#define	TRB_TYPE_BITMASK	(0xfc00)
1085#define TRB_TYPE(p)		((p) << 10)
1086#define TRB_FIELD_TO_TYPE(p)	(((p) & TRB_TYPE_BITMASK) >> 10)
1087/* TRB type IDs */
1088/* bulk, interrupt, isoc scatter/gather, and control data stage */
1089#define TRB_NORMAL		1
1090/* setup stage for control transfers */
1091#define TRB_SETUP		2
1092/* data stage for control transfers */
1093#define TRB_DATA		3
1094/* status stage for control transfers */
1095#define TRB_STATUS		4
1096/* isoc transfers */
1097#define TRB_ISOC		5
1098/* TRB for linking ring segments */
1099#define TRB_LINK		6
1100#define TRB_EVENT_DATA		7
1101/* Transfer Ring No-op (not for the command ring) */
1102#define TRB_TR_NOOP		8
1103/* Command TRBs */
1104/* Enable Slot Command */
1105#define TRB_ENABLE_SLOT		9
1106/* Disable Slot Command */
1107#define TRB_DISABLE_SLOT	10
1108/* Address Device Command */
1109#define TRB_ADDR_DEV		11
1110/* Configure Endpoint Command */
1111#define TRB_CONFIG_EP		12
1112/* Evaluate Context Command */
1113#define TRB_EVAL_CONTEXT	13
1114/* Reset Endpoint Command */
1115#define TRB_RESET_EP		14
1116/* Stop Transfer Ring Command */
1117#define TRB_STOP_RING		15
1118/* Set Transfer Ring Dequeue Pointer Command */
1119#define TRB_SET_DEQ		16
1120/* Reset Device Command */
1121#define TRB_RESET_DEV		17
1122/* Force Event Command (opt) */
1123#define TRB_FORCE_EVENT		18
1124/* Negotiate Bandwidth Command (opt) */
1125#define TRB_NEG_BANDWIDTH	19
1126/* Set Latency Tolerance Value Command (opt) */
1127#define TRB_SET_LT		20
1128/* Get port bandwidth Command */
1129#define TRB_GET_BW		21
1130/* Force Header Command - generate a transaction or link management packet */
1131#define TRB_FORCE_HEADER	22
1132/* No-op Command - not for transfer rings */
1133#define TRB_CMD_NOOP		23
1134/* TRB IDs 24-31 reserved */
1135/* Event TRBS */
1136/* Transfer Event */
1137#define TRB_TRANSFER		32
1138/* Command Completion Event */
1139#define TRB_COMPLETION		33
1140/* Port Status Change Event */
1141#define TRB_PORT_STATUS		34
1142/* Bandwidth Request Event (opt) */
1143#define TRB_BANDWIDTH_EVENT	35
1144/* Doorbell Event (opt) */
1145#define TRB_DOORBELL		36
1146/* Host Controller Event */
1147#define TRB_HC_EVENT		37
1148/* Device Notification Event - device sent function wake notification */
1149#define TRB_DEV_NOTE		38
1150/* MFINDEX Wrap Event - microframe counter wrapped */
1151#define TRB_MFINDEX_WRAP	39
1152/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
1153#define TRB_VENDOR_DEFINED_LOW	48
1154/* Nec vendor-specific command completion event. */
1155#define	TRB_NEC_CMD_COMP	48
1156/* Get NEC firmware revision. */
1157#define	TRB_NEC_GET_FW		49
1158
1159static inline const char *xhci_trb_type_string(u8 type)
1160{
1161	switch (type) {
1162	case TRB_NORMAL:
1163		return "Normal";
1164	case TRB_SETUP:
1165		return "Setup Stage";
1166	case TRB_DATA:
1167		return "Data Stage";
1168	case TRB_STATUS:
1169		return "Status Stage";
1170	case TRB_ISOC:
1171		return "Isoch";
1172	case TRB_LINK:
1173		return "Link";
1174	case TRB_EVENT_DATA:
1175		return "Event Data";
1176	case TRB_TR_NOOP:
1177		return "No-Op";
1178	case TRB_ENABLE_SLOT:
1179		return "Enable Slot Command";
1180	case TRB_DISABLE_SLOT:
1181		return "Disable Slot Command";
1182	case TRB_ADDR_DEV:
1183		return "Address Device Command";
1184	case TRB_CONFIG_EP:
1185		return "Configure Endpoint Command";
1186	case TRB_EVAL_CONTEXT:
1187		return "Evaluate Context Command";
1188	case TRB_RESET_EP:
1189		return "Reset Endpoint Command";
1190	case TRB_STOP_RING:
1191		return "Stop Ring Command";
1192	case TRB_SET_DEQ:
1193		return "Set TR Dequeue Pointer Command";
1194	case TRB_RESET_DEV:
1195		return "Reset Device Command";
1196	case TRB_FORCE_EVENT:
1197		return "Force Event Command";
1198	case TRB_NEG_BANDWIDTH:
1199		return "Negotiate Bandwidth Command";
1200	case TRB_SET_LT:
1201		return "Set Latency Tolerance Value Command";
1202	case TRB_GET_BW:
1203		return "Get Port Bandwidth Command";
1204	case TRB_FORCE_HEADER:
1205		return "Force Header Command";
1206	case TRB_CMD_NOOP:
1207		return "No-Op Command";
1208	case TRB_TRANSFER:
1209		return "Transfer Event";
1210	case TRB_COMPLETION:
1211		return "Command Completion Event";
1212	case TRB_PORT_STATUS:
1213		return "Port Status Change Event";
1214	case TRB_BANDWIDTH_EVENT:
1215		return "Bandwidth Request Event";
1216	case TRB_DOORBELL:
1217		return "Doorbell Event";
1218	case TRB_HC_EVENT:
1219		return "Host Controller Event";
1220	case TRB_DEV_NOTE:
1221		return "Device Notification Event";
1222	case TRB_MFINDEX_WRAP:
1223		return "MFINDEX Wrap Event";
1224	case TRB_NEC_CMD_COMP:
1225		return "NEC Command Completion Event";
1226	case TRB_NEC_GET_FW:
1227		return "NET Get Firmware Revision Command";
1228	default:
1229		return "UNKNOWN";
1230	}
1231}
1232
1233#define TRB_TYPE_LINK(x)	(((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
1234/* Above, but for __le32 types -- can avoid work by swapping constants: */
1235#define TRB_TYPE_LINK_LE32(x)	(((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
1236				 cpu_to_le32(TRB_TYPE(TRB_LINK)))
1237#define TRB_TYPE_NOOP_LE32(x)	(((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
1238				 cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
1239
1240#define NEC_FW_MINOR(p)		(((p) >> 0) & 0xff)
1241#define NEC_FW_MAJOR(p)		(((p) >> 8) & 0xff)
1242
1243/*
1244 * TRBS_PER_SEGMENT must be a multiple of 4,
1245 * since the command ring is 64-byte aligned.
1246 * It must also be greater than 16.
1247 */
1248#define TRBS_PER_SEGMENT	256
1249/* Allow two commands + a link TRB, along with any reserved command TRBs */
1250#define MAX_RSVD_CMD_TRBS	(TRBS_PER_SEGMENT - 3)
1251#define TRB_SEGMENT_SIZE	(TRBS_PER_SEGMENT*16)
1252#define TRB_SEGMENT_SHIFT	(ilog2(TRB_SEGMENT_SIZE))
1253/* TRB buffer pointers can't cross 64KB boundaries */
1254#define TRB_MAX_BUFF_SHIFT		16
1255#define TRB_MAX_BUFF_SIZE	(1 << TRB_MAX_BUFF_SHIFT)
1256/* How much data is left before the 64KB boundary? */
1257#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr)	(TRB_MAX_BUFF_SIZE - \
1258					(addr & (TRB_MAX_BUFF_SIZE - 1)))
1259#define MAX_SOFT_RETRY		3
1260/*
1261 * Limits of consecutive isoc trbs that can Block Event Interrupt (BEI) if
1262 * XHCI_AVOID_BEI quirk is in use.
1263 */
1264#define AVOID_BEI_INTERVAL_MIN	8
1265#define AVOID_BEI_INTERVAL_MAX	32
1266
1267#define xhci_for_each_ring_seg(head, seg) \
1268	for (seg = head; seg != NULL; seg = (seg->next != head ? seg->next : NULL))
1269
1270struct xhci_segment {
1271	union xhci_trb		*trbs;
1272	/* private to HCD */
1273	struct xhci_segment	*next;
1274	unsigned int		num;
1275	dma_addr_t		dma;
1276	/* Max packet sized bounce buffer for td-fragmant alignment */
1277	dma_addr_t		bounce_dma;
1278	void			*bounce_buf;
1279	unsigned int		bounce_offs;
1280	unsigned int		bounce_len;
1281};
1282
1283enum xhci_cancelled_td_status {
1284	TD_DIRTY = 0,
1285	TD_HALTED,
1286	TD_CLEARING_CACHE,
1287	TD_CLEARING_CACHE_DEFERRED,
1288	TD_CLEARED,
1289};
1290
1291struct xhci_td {
1292	struct list_head	td_list;
1293	struct list_head	cancelled_td_list;
1294	int			status;
1295	enum xhci_cancelled_td_status	cancel_status;
1296	struct urb		*urb;
1297	struct xhci_segment	*start_seg;
1298	union xhci_trb		*start_trb;
1299	struct xhci_segment	*end_seg;
1300	union xhci_trb		*end_trb;
1301	struct xhci_segment	*bounce_seg;
1302	/* actual_length of the URB has already been set */
1303	bool			urb_length_set;
1304	bool			error_mid_td;
1305};
1306
1307/*
1308 * xHCI command default timeout value in milliseconds.
1309 * USB 3.2 spec, section 9.2.6.1
1310 */
1311#define XHCI_CMD_DEFAULT_TIMEOUT	5000
1312
1313/* command descriptor */
1314struct xhci_cd {
 
1315	struct xhci_command	*command;
1316	union xhci_trb		*cmd_trb;
1317};
1318
 
 
 
 
 
 
1319enum xhci_ring_type {
1320	TYPE_CTRL = 0,
1321	TYPE_ISOC,
1322	TYPE_BULK,
1323	TYPE_INTR,
1324	TYPE_STREAM,
1325	TYPE_COMMAND,
1326	TYPE_EVENT,
1327};
1328
1329static inline const char *xhci_ring_type_string(enum xhci_ring_type type)
1330{
1331	switch (type) {
1332	case TYPE_CTRL:
1333		return "CTRL";
1334	case TYPE_ISOC:
1335		return "ISOC";
1336	case TYPE_BULK:
1337		return "BULK";
1338	case TYPE_INTR:
1339		return "INTR";
1340	case TYPE_STREAM:
1341		return "STREAM";
1342	case TYPE_COMMAND:
1343		return "CMD";
1344	case TYPE_EVENT:
1345		return "EVENT";
1346	}
1347
1348	return "UNKNOWN";
1349}
1350
1351struct xhci_ring {
1352	struct xhci_segment	*first_seg;
1353	struct xhci_segment	*last_seg;
1354	union  xhci_trb		*enqueue;
1355	struct xhci_segment	*enq_seg;
 
1356	union  xhci_trb		*dequeue;
1357	struct xhci_segment	*deq_seg;
 
1358	struct list_head	td_list;
1359	/*
1360	 * Write the cycle state into the TRB cycle field to give ownership of
1361	 * the TRB to the host controller (if we are the producer), or to check
1362	 * if we own the TRB (if we are the consumer).  See section 4.9.1.
1363	 */
1364	u32			cycle_state;
1365	unsigned int		stream_id;
1366	unsigned int		num_segs;
1367	unsigned int		num_trbs_free; /* used only by xhci DbC */
1368	unsigned int		bounce_buf_len;
1369	enum xhci_ring_type	type;
1370	bool			last_td_was_short;
1371	struct radix_tree_root	*trb_address_map;
1372};
1373
1374struct xhci_erst_entry {
1375	/* 64-bit event ring segment address */
1376	__le64	seg_addr;
1377	__le32	seg_size;
1378	/* Set to zero */
1379	__le32	rsvd;
1380};
1381
1382struct xhci_erst {
1383	struct xhci_erst_entry	*entries;
1384	unsigned int		num_entries;
1385	/* xhci->event_ring keeps track of segment dma addresses */
1386	dma_addr_t		erst_dma_addr;
 
 
1387};
1388
1389struct xhci_scratchpad {
1390	u64 *sp_array;
1391	dma_addr_t sp_dma;
1392	void **sp_buffers;
 
1393};
1394
1395struct urb_priv {
1396	int	num_tds;
1397	int	num_tds_done;
1398	struct	xhci_td	td[] __counted_by(num_tds);
1399};
1400
1401/* Number of Event Ring segments to allocate, when amount is not specified. (spec allows 32k) */
1402#define	ERST_DEFAULT_SEGS	2
 
 
 
 
 
 
 
 
1403/* Poll every 60 seconds */
1404#define	POLL_TIMEOUT	60
1405/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
1406#define XHCI_STOP_EP_CMD_TIMEOUT	5
1407/* XXX: Make these module parameters */
1408
1409struct s3_save {
1410	u32	command;
1411	u32	dev_nt;
1412	u64	dcbaa_ptr;
1413	u32	config_reg;
 
 
 
 
 
1414};
1415
1416/* Use for lpm */
1417struct dev_info {
1418	u32			dev_id;
1419	struct	list_head	list;
1420};
1421
1422struct xhci_bus_state {
1423	unsigned long		bus_suspended;
1424	unsigned long		next_statechange;
1425
1426	/* Port suspend arrays are indexed by the portnum of the fake roothub */
1427	/* ports suspend status arrays - max 31 ports for USB2, 15 for USB3 */
1428	u32			port_c_suspend;
1429	u32			suspended_ports;
1430	u32			port_remote_wakeup;
 
1431	/* which ports have started to resume */
1432	unsigned long		resuming_ports;
 
 
 
1433};
1434
1435struct xhci_interrupter {
1436	struct xhci_ring	*event_ring;
1437	struct xhci_erst	erst;
1438	struct xhci_intr_reg __iomem *ir_set;
1439	unsigned int		intr_num;
1440	bool			ip_autoclear;
1441	u32			isoc_bei_interval;
1442	/* For interrupter registers save and restore over suspend/resume */
1443	u32	s3_irq_pending;
1444	u32	s3_irq_control;
1445	u32	s3_erst_size;
1446	u64	s3_erst_base;
1447	u64	s3_erst_dequeue;
1448};
1449/*
1450 * It can take up to 20 ms to transition from RExit to U0 on the
1451 * Intel Lynx Point LP xHCI host.
1452 */
1453#define	XHCI_MAX_REXIT_TIMEOUT_MS	20
1454struct xhci_port_cap {
1455	u32			*psi;	/* array of protocol speed ID entries */
1456	u8			psi_count;
1457	u8			psi_uid_count;
1458	u8			maj_rev;
1459	u8			min_rev;
1460	u32			protocol_caps;
1461};
1462
1463struct xhci_port {
1464	__le32 __iomem		*addr;
1465	int			hw_portnum;
1466	int			hcd_portnum;
1467	struct xhci_hub		*rhub;
1468	struct xhci_port_cap	*port_cap;
1469	unsigned int		lpm_incapable:1;
1470	unsigned long		resume_timestamp;
1471	bool			rexit_active;
1472	/* Slot ID is the index of the device directly connected to the port */
1473	int			slot_id;
1474	struct completion	rexit_done;
1475	struct completion	u3exit_done;
1476};
1477
1478struct xhci_hub {
1479	struct xhci_port	**ports;
1480	unsigned int		num_ports;
1481	struct usb_hcd		*hcd;
1482	/* keep track of bus suspend info */
1483	struct xhci_bus_state   bus_state;
1484	/* supported prococol extended capabiliy values */
1485	u8			maj_rev;
1486	u8			min_rev;
1487};
1488
1489/* There is one xhci_hcd structure per controller */
1490struct xhci_hcd {
1491	struct usb_hcd *main_hcd;
1492	struct usb_hcd *shared_hcd;
1493	/* glue to PCI and HCD framework */
1494	struct xhci_cap_regs __iomem *cap_regs;
1495	struct xhci_op_regs __iomem *op_regs;
1496	struct xhci_run_regs __iomem *run_regs;
1497	struct xhci_doorbell_array __iomem *dba;
 
 
1498
1499	/* Cached register copies of read-only HC data */
1500	__u32		hcs_params1;
1501	__u32		hcs_params2;
1502	__u32		hcs_params3;
1503	__u32		hcc_params;
1504	__u32		hcc_params2;
1505
1506	spinlock_t	lock;
1507
1508	/* packed release number */
 
1509	u16		hci_version;
1510	u16		max_interrupters;
1511	/* imod_interval in ns (I * 250ns) */
1512	u32		imod_interval;
 
 
 
1513	/* 4KB min, 128MB max */
1514	int		page_size;
1515	/* Valid values are 12 to 20, inclusive */
1516	int		page_shift;
1517	/* MSI-X/MSI vectors */
1518	int		nvecs;
1519	/* optional clocks */
1520	struct clk		*clk;
1521	struct clk		*reg_clk;
1522	/* optional reset controller */
1523	struct reset_control *reset;
1524	/* data structures */
1525	struct xhci_device_context_array *dcbaa;
1526	struct xhci_interrupter **interrupters;
1527	struct xhci_ring	*cmd_ring;
1528	unsigned int            cmd_ring_state;
1529#define CMD_RING_STATE_RUNNING         (1 << 0)
1530#define CMD_RING_STATE_ABORTED         (1 << 1)
1531#define CMD_RING_STATE_STOPPED         (1 << 2)
1532	struct list_head        cmd_list;
1533	unsigned int		cmd_ring_reserved_trbs;
1534	struct delayed_work	cmd_timer;
1535	struct completion	cmd_ring_stop_completion;
1536	struct xhci_command	*current_cmd;
1537
1538	/* Scratchpad */
1539	struct xhci_scratchpad  *scratchpad;
 
 
1540
1541	/* slot enabling and address device helpers */
1542	/* these are not thread safe so use mutex */
1543	struct mutex mutex;
 
 
1544	/* Internal mirror of the HW's dcbaa */
1545	struct xhci_virt_device	*devs[MAX_HC_SLOTS];
1546	/* For keeping track of bandwidth domains per roothub. */
1547	struct xhci_root_port_bw_info	*rh_bw;
1548
1549	/* DMA pools */
1550	struct dma_pool	*device_pool;
1551	struct dma_pool	*segment_pool;
1552	struct dma_pool	*small_streams_pool;
1553	struct dma_pool	*medium_streams_pool;
1554
1555	/* Host controller watchdog timer structures */
1556	unsigned int		xhc_state;
1557	unsigned long		run_graceperiod;
 
1558	struct s3_save		s3;
1559/* Host controller is dying - not responding to commands. "I'm not dead yet!"
1560 *
1561 * xHC interrupts have been disabled and a watchdog timer will (or has already)
1562 * halt the xHCI host, and complete all URBs with an -ESHUTDOWN code.  Any code
1563 * that sees this status (other than the timer that set it) should stop touching
1564 * hardware immediately.  Interrupt handlers should return immediately when
1565 * they see this status (any time they drop and re-acquire xhci->lock).
1566 * xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without
1567 * putting the TD on the canceled list, etc.
1568 *
1569 * There are no reports of xHCI host controllers that display this issue.
1570 */
1571#define XHCI_STATE_DYING	(1 << 0)
1572#define XHCI_STATE_HALTED	(1 << 1)
1573#define XHCI_STATE_REMOVING	(1 << 2)
1574	unsigned long long	quirks;
1575#define	XHCI_LINK_TRB_QUIRK	BIT_ULL(0)
1576#define XHCI_RESET_EP_QUIRK	BIT_ULL(1) /* Deprecated */
1577#define XHCI_NEC_HOST		BIT_ULL(2)
1578#define XHCI_AMD_PLL_FIX	BIT_ULL(3)
1579#define XHCI_SPURIOUS_SUCCESS	BIT_ULL(4)
 
1580/*
1581 * Certain Intel host controllers have a limit to the number of endpoint
1582 * contexts they can handle.  Ideally, they would signal that they can't handle
1583 * anymore endpoint contexts by returning a Resource Error for the Configure
1584 * Endpoint command, but they don't.  Instead they expect software to keep track
1585 * of the number of active endpoints for them, across configure endpoint
1586 * commands, reset device commands, disable slot commands, and address device
1587 * commands.
1588 */
1589#define XHCI_EP_LIMIT_QUIRK	BIT_ULL(5)
1590#define XHCI_BROKEN_MSI		BIT_ULL(6)
1591#define XHCI_RESET_ON_RESUME	BIT_ULL(7)
1592#define	XHCI_SW_BW_CHECKING	BIT_ULL(8)
1593#define XHCI_AMD_0x96_HOST	BIT_ULL(9)
1594#define XHCI_TRUST_TX_LENGTH	BIT_ULL(10) /* Deprecated */
1595#define XHCI_LPM_SUPPORT	BIT_ULL(11)
1596#define XHCI_INTEL_HOST		BIT_ULL(12)
1597#define XHCI_SPURIOUS_REBOOT	BIT_ULL(13)
1598#define XHCI_COMP_MODE_QUIRK	BIT_ULL(14)
1599#define XHCI_AVOID_BEI		BIT_ULL(15)
1600#define XHCI_PLAT		BIT_ULL(16) /* Deprecated */
1601#define XHCI_SLOW_SUSPEND	BIT_ULL(17)
1602#define XHCI_SPURIOUS_WAKEUP	BIT_ULL(18)
1603/* For controllers with a broken beyond repair streams implementation */
1604#define XHCI_BROKEN_STREAMS	BIT_ULL(19)
1605#define XHCI_PME_STUCK_QUIRK	BIT_ULL(20)
1606#define XHCI_MTK_HOST		BIT_ULL(21)
1607#define XHCI_SSIC_PORT_UNUSED	BIT_ULL(22)
1608#define XHCI_NO_64BIT_SUPPORT	BIT_ULL(23)
1609#define XHCI_MISSING_CAS	BIT_ULL(24)
1610/* For controller with a broken Port Disable implementation */
1611#define XHCI_BROKEN_PORT_PED	BIT_ULL(25)
1612#define XHCI_LIMIT_ENDPOINT_INTERVAL_7	BIT_ULL(26)
1613#define XHCI_U2_DISABLE_WAKE	BIT_ULL(27)
1614#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL	BIT_ULL(28)
1615#define XHCI_HW_LPM_DISABLE	BIT_ULL(29)
1616#define XHCI_SUSPEND_DELAY	BIT_ULL(30)
1617#define XHCI_INTEL_USB_ROLE_SW	BIT_ULL(31)
1618#define XHCI_ZERO_64B_REGS	BIT_ULL(32)
1619#define XHCI_DEFAULT_PM_RUNTIME_ALLOW	BIT_ULL(33)
1620#define XHCI_RESET_PLL_ON_DISCONNECT	BIT_ULL(34)
1621#define XHCI_SNPS_BROKEN_SUSPEND    BIT_ULL(35)
1622/* Reserved. It was XHCI_RENESAS_FW_QUIRK */
1623#define XHCI_SKIP_PHY_INIT	BIT_ULL(37)
1624#define XHCI_DISABLE_SPARSE	BIT_ULL(38)
1625#define XHCI_SG_TRB_CACHE_SIZE_QUIRK	BIT_ULL(39)
1626#define XHCI_NO_SOFT_RETRY	BIT_ULL(40)
1627#define XHCI_BROKEN_D3COLD_S2I	BIT_ULL(41)
1628#define XHCI_EP_CTX_BROKEN_DCS	BIT_ULL(42)
1629#define XHCI_SUSPEND_RESUME_CLKS	BIT_ULL(43)
1630#define XHCI_RESET_TO_DEFAULT	BIT_ULL(44)
1631#define XHCI_TRB_OVERFETCH	BIT_ULL(45)
1632#define XHCI_ZHAOXIN_HOST	BIT_ULL(46)
1633#define XHCI_WRITE_64_HI_LO	BIT_ULL(47)
1634#define XHCI_CDNS_SCTX_QUIRK	BIT_ULL(48)
1635#define XHCI_ETRON_HOST	BIT_ULL(49)
1636
1637	unsigned int		num_active_eps;
1638	unsigned int		limit_active_eps;
1639	struct xhci_port	*hw_ports;
1640	struct xhci_hub		usb2_rhub;
1641	struct xhci_hub		usb3_rhub;
 
 
 
 
 
 
 
 
 
1642	/* support xHCI 1.0 spec USB2 hardware LPM */
1643	unsigned		hw_lpm_support:1;
1644	/* Broken Suspend flag for SNPS Suspend resume issue */
1645	unsigned		broken_suspend:1;
1646	/* Indicates that omitting hcd is supported if root hub has no ports */
1647	unsigned		allow_single_roothub:1;
1648	/* cached extended protocol port capabilities */
1649	struct xhci_port_cap	*port_caps;
1650	unsigned int		num_port_caps;
1651	/* Compliance Mode Recovery Data */
1652	struct timer_list	comp_mode_recovery_timer;
1653	u32			port_status_u0;
1654	u16			test_mode;
1655/* Compliance Mode Timer Triggered every 2 seconds */
1656#define COMP_MODE_RCVRY_MSECS 2000
1657
1658	struct dentry		*debugfs_root;
1659	struct dentry		*debugfs_slots;
1660	struct list_head	regset_list;
1661
1662	void			*dbc;
1663	/* platform-specific data -- must come last */
1664	unsigned long		priv[] __aligned(sizeof(s64));
1665};
1666
1667/* Platform specific overrides to generic XHCI hc_driver ops */
1668struct xhci_driver_overrides {
1669	size_t extra_priv_size;
1670	int (*reset)(struct usb_hcd *hcd);
1671	int (*start)(struct usb_hcd *hcd);
1672	int (*add_endpoint)(struct usb_hcd *hcd, struct usb_device *udev,
1673			    struct usb_host_endpoint *ep);
1674	int (*drop_endpoint)(struct usb_hcd *hcd, struct usb_device *udev,
1675			     struct usb_host_endpoint *ep);
1676	int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
1677	void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
1678	int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
1679			    struct usb_tt *tt, gfp_t mem_flags);
1680	int (*hub_control)(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1681			   u16 wIndex, char *buf, u16 wLength);
1682};
1683
1684#define	XHCI_CFC_DELAY		10
1685
1686/* convert between an HCD pointer and the corresponding EHCI_HCD */
1687static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
1688{
1689	struct usb_hcd *primary_hcd;
1690
1691	if (usb_hcd_is_primary_hcd(hcd))
1692		primary_hcd = hcd;
1693	else
1694		primary_hcd = hcd->primary_hcd;
1695
1696	return (struct xhci_hcd *) (primary_hcd->hcd_priv);
1697}
1698
1699static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
1700{
1701	return xhci->main_hcd;
1702}
1703
1704static inline struct usb_hcd *xhci_get_usb3_hcd(struct xhci_hcd *xhci)
1705{
1706	if (xhci->shared_hcd)
1707		return xhci->shared_hcd;
1708
1709	if (!xhci->usb2_rhub.num_ports)
1710		return xhci->main_hcd;
1711
1712	return NULL;
1713}
1714
1715static inline bool xhci_hcd_is_usb3(struct usb_hcd *hcd)
1716{
1717	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1718
1719	return hcd == xhci_get_usb3_hcd(xhci);
1720}
1721
1722static inline bool xhci_has_one_roothub(struct xhci_hcd *xhci)
1723{
1724	return xhci->allow_single_roothub &&
1725	       (!xhci->usb2_rhub.num_ports || !xhci->usb3_rhub.num_ports);
1726}
1727
1728#define xhci_dbg(xhci, fmt, args...) \
1729	dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1730#define xhci_err(xhci, fmt, args...) \
1731	dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1732#define xhci_warn(xhci, fmt, args...) \
1733	dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1734#define xhci_info(xhci, fmt, args...) \
1735	dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1736
1737/*
1738 * Registers should always be accessed with double word or quad word accesses.
1739 *
1740 * Some xHCI implementations may support 64-bit address pointers.  Registers
1741 * with 64-bit address pointers should be written to with dword accesses by
1742 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
1743 * xHCI implementations that do not support 64-bit address pointers will ignore
1744 * the high dword, and write order is irrelevant.
1745 */
1746static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
1747		__le64 __iomem *regs)
1748{
1749	return lo_hi_readq(regs);
 
 
 
1750}
1751static inline void xhci_write_64(struct xhci_hcd *xhci,
1752				 const u64 val, __le64 __iomem *regs)
1753{
1754	lo_hi_writeq(val, regs);
 
 
 
 
 
1755}
1756
1757
1758/* Link TRB chain should always be set on 0.95 hosts, and AMD 0.96 ISOC rings */
1759static inline bool xhci_link_chain_quirk(struct xhci_hcd *xhci, enum xhci_ring_type type)
1760{
1761	return (xhci->quirks & XHCI_LINK_TRB_QUIRK) ||
1762	       (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST));
1763}
1764
1765/* xHCI debugging */
 
 
 
 
 
 
 
 
 
 
 
 
1766char *xhci_get_slot_state(struct xhci_hcd *xhci,
1767		struct xhci_container_ctx *ctx);
 
 
 
1768void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
1769			const char *fmt, ...);
1770
1771/* xHCI memory management */
1772void xhci_mem_cleanup(struct xhci_hcd *xhci);
1773int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
1774void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
1775int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
1776int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
1777void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1778		struct usb_device *udev);
1779unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
 
 
 
1780unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
1781void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
 
 
 
 
 
 
1782void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
1783		struct xhci_virt_device *virt_dev,
1784		int old_active_eps);
1785void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info);
1786void xhci_update_bw_info(struct xhci_hcd *xhci,
1787		struct xhci_container_ctx *in_ctx,
1788		struct xhci_input_control_ctx *ctrl_ctx,
1789		struct xhci_virt_device *virt_dev);
1790void xhci_endpoint_copy(struct xhci_hcd *xhci,
1791		struct xhci_container_ctx *in_ctx,
1792		struct xhci_container_ctx *out_ctx,
1793		unsigned int ep_index);
1794void xhci_slot_copy(struct xhci_hcd *xhci,
1795		struct xhci_container_ctx *in_ctx,
1796		struct xhci_container_ctx *out_ctx);
1797int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
1798		struct usb_device *udev, struct usb_host_endpoint *ep,
1799		gfp_t mem_flags);
1800struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
1801		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
1802void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
1803int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
1804		unsigned int num_trbs, gfp_t flags);
1805void xhci_initialize_ring_info(struct xhci_ring *ring);
1806void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
1807		struct xhci_virt_device *virt_dev,
1808		unsigned int ep_index);
1809struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
1810		unsigned int num_stream_ctxs,
1811		unsigned int num_streams,
1812		unsigned int max_packet, gfp_t flags);
1813void xhci_free_stream_info(struct xhci_hcd *xhci,
1814		struct xhci_stream_info *stream_info);
1815void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
1816		struct xhci_ep_ctx *ep_ctx,
1817		struct xhci_stream_info *stream_info);
1818void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
 
1819		struct xhci_virt_ep *ep);
1820void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
1821	struct xhci_virt_device *virt_dev, bool drop_control_ep);
1822struct xhci_ring *xhci_dma_to_transfer_ring(
1823		struct xhci_virt_ep *ep,
1824		u64 address);
 
 
 
 
1825struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1826		bool allocate_completion, gfp_t mem_flags);
1827struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1828		bool allocate_completion, gfp_t mem_flags);
1829void xhci_urb_free_priv(struct urb_priv *urb_priv);
1830void xhci_free_command(struct xhci_hcd *xhci,
1831		struct xhci_command *command);
1832struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
1833		int type, gfp_t flags);
1834void xhci_free_container_ctx(struct xhci_hcd *xhci,
1835		struct xhci_container_ctx *ctx);
1836struct xhci_interrupter *
1837xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
1838				  u32 imod_interval);
1839void xhci_remove_secondary_interrupter(struct usb_hcd
1840				       *hcd, struct xhci_interrupter *ir);
 
 
 
 
 
 
 
 
 
 
 
1841
1842/* xHCI host controller glue */
1843typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
1844int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us);
1845int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
1846		u32 mask, u32 done, int usec, unsigned int exit_state);
1847void xhci_quiesce(struct xhci_hcd *xhci);
1848int xhci_halt(struct xhci_hcd *xhci);
1849int xhci_start(struct xhci_hcd *xhci);
1850int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us);
1851int xhci_run(struct usb_hcd *hcd);
 
 
1852int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
1853void xhci_shutdown(struct usb_hcd *hcd);
1854void xhci_stop(struct usb_hcd *hcd);
1855void xhci_init_driver(struct hc_driver *drv,
1856		      const struct xhci_driver_overrides *over);
1857int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1858		      struct usb_host_endpoint *ep);
1859int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1860		       struct usb_host_endpoint *ep);
1861int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1862void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1863int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
1864			   struct usb_tt *tt, gfp_t mem_flags);
1865int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
1866int xhci_ext_cap_init(struct xhci_hcd *xhci);
1867
1868int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
1869int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg);
 
 
 
 
 
1870
 
1871irqreturn_t xhci_irq(struct usb_hcd *hcd);
1872irqreturn_t xhci_msi_irq(int irq, void *hcd);
1873int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
 
1874int xhci_alloc_tt_info(struct xhci_hcd *xhci,
1875		struct xhci_virt_device *virt_dev,
1876		struct usb_device *hdev,
1877		struct usb_tt *tt, gfp_t mem_flags);
1878int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
1879				    u32 imod_interval);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1880
1881/* xHCI ring, segment, TRB, and TD functions */
1882dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
1883struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td,
1884			       dma_addr_t suspect_dma, bool debug);
 
1885int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
1886void xhci_ring_cmd_db(struct xhci_hcd *xhci);
1887int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
1888		u32 trb_type, u32 slot_id);
1889int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
1890		dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev);
1891int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
1892		u32 field1, u32 field2, u32 field3, u32 field4);
1893int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
1894		int slot_id, unsigned int ep_index, int suspend);
1895int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1896		int slot_id, unsigned int ep_index);
1897int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1898		int slot_id, unsigned int ep_index);
1899int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1900		int slot_id, unsigned int ep_index);
1901int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
1902		struct urb *urb, int slot_id, unsigned int ep_index);
1903int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
1904		struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id,
1905		bool command_must_succeed);
1906int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
1907		dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed);
1908int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
1909		int slot_id, unsigned int ep_index,
1910		enum xhci_ep_reset_type reset_type);
1911int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
1912		u32 slot_id);
1913void xhci_handle_command_timeout(struct work_struct *work);
1914
 
 
 
 
 
 
 
 
 
 
 
1915void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
1916		unsigned int ep_index, unsigned int stream_id);
1917void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
1918		unsigned int slot_id,
1919		unsigned int ep_index);
1920void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
1921void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
1922unsigned int count_trbs(u64 addr, u64 len);
1923int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
1924			    int suspend, gfp_t gfp_flags);
1925void xhci_process_cancelled_tds(struct xhci_virt_ep *ep);
1926
1927/* xHCI roothub code */
1928void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
1929				u32 link_state);
1930void xhci_test_and_clear_bit(struct xhci_hcd *xhci, struct xhci_port *port,
1931				u32 port_bit);
 
 
 
 
1932int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
1933		char *buf, u16 wLength);
1934int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
1935int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
1936struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
1937enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
1938						struct xhci_port *port);
1939void xhci_hc_died(struct xhci_hcd *xhci);
1940
1941#ifdef CONFIG_PM
1942int xhci_bus_suspend(struct usb_hcd *hcd);
1943int xhci_bus_resume(struct usb_hcd *hcd);
1944unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd);
1945#else
1946#define	xhci_bus_suspend	NULL
1947#define	xhci_bus_resume		NULL
1948#define	xhci_get_resuming_ports	NULL
1949#endif	/* CONFIG_PM */
1950
1951u32 xhci_port_state_to_neutral(u32 state);
 
 
1952void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
1953
1954/* xHCI contexts */
1955struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx);
1956struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
1957struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
1958
1959struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
1960		unsigned int slot_id, unsigned int ep_index,
1961		unsigned int stream_id);
1962
1963static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1964								struct urb *urb)
1965{
1966	return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
1967					xhci_get_endpoint_index(&urb->ep->desc),
1968					urb->stream_id);
1969}
1970
1971/*
1972 * TODO: As per spec Isochronous IDT transmissions are supported. We bypass
1973 * them anyways as we where unable to find a device that matches the
1974 * constraints.
1975 */
1976static inline bool xhci_urb_suitable_for_idt(struct urb *urb)
1977{
1978	if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) &&
1979	    usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE &&
1980	    urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE &&
1981	    !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) &&
1982	    !urb->num_sgs)
1983		return true;
1984
1985	return false;
1986}
1987
1988static inline char *xhci_slot_state_string(u32 state)
1989{
1990	switch (state) {
1991	case SLOT_STATE_ENABLED:
1992		return "enabled/disabled";
1993	case SLOT_STATE_DEFAULT:
1994		return "default";
1995	case SLOT_STATE_ADDRESSED:
1996		return "addressed";
1997	case SLOT_STATE_CONFIGURED:
1998		return "configured";
1999	default:
2000		return "reserved";
2001	}
2002}
2003
2004static inline const char *xhci_decode_trb(char *str, size_t size,
2005					  u32 field0, u32 field1, u32 field2, u32 field3)
2006{
2007	int type = TRB_FIELD_TO_TYPE(field3);
2008
2009	switch (type) {
2010	case TRB_LINK:
2011		snprintf(str, size,
2012			"LINK %08x%08x intr %d type '%s' flags %c:%c:%c:%c",
2013			field1, field0, GET_INTR_TARGET(field2),
2014			xhci_trb_type_string(type),
2015			field3 & TRB_IOC ? 'I' : 'i',
2016			field3 & TRB_CHAIN ? 'C' : 'c',
2017			field3 & TRB_TC ? 'T' : 't',
2018			field3 & TRB_CYCLE ? 'C' : 'c');
2019		break;
2020	case TRB_TRANSFER:
2021	case TRB_COMPLETION:
2022	case TRB_PORT_STATUS:
2023	case TRB_BANDWIDTH_EVENT:
2024	case TRB_DOORBELL:
2025	case TRB_HC_EVENT:
2026	case TRB_DEV_NOTE:
2027	case TRB_MFINDEX_WRAP:
2028		snprintf(str, size,
2029			"TRB %08x%08x status '%s' len %d slot %d ep %d type '%s' flags %c:%c",
2030			field1, field0,
2031			xhci_trb_comp_code_string(GET_COMP_CODE(field2)),
2032			EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
2033			TRB_TO_EP_ID(field3),
2034			xhci_trb_type_string(type),
2035			field3 & EVENT_DATA ? 'E' : 'e',
2036			field3 & TRB_CYCLE ? 'C' : 'c');
2037
2038		break;
2039	case TRB_SETUP:
2040		snprintf(str, size,
2041			"bRequestType %02x bRequest %02x wValue %02x%02x wIndex %02x%02x wLength %d length %d TD size %d intr %d type '%s' flags %c:%c:%c",
2042				field0 & 0xff,
2043				(field0 & 0xff00) >> 8,
2044				(field0 & 0xff000000) >> 24,
2045				(field0 & 0xff0000) >> 16,
2046				(field1 & 0xff00) >> 8,
2047				field1 & 0xff,
2048				(field1 & 0xff000000) >> 16 |
2049				(field1 & 0xff0000) >> 16,
2050				TRB_LEN(field2), GET_TD_SIZE(field2),
2051				GET_INTR_TARGET(field2),
2052				xhci_trb_type_string(type),
2053				field3 & TRB_IDT ? 'I' : 'i',
2054				field3 & TRB_IOC ? 'I' : 'i',
2055				field3 & TRB_CYCLE ? 'C' : 'c');
2056		break;
2057	case TRB_DATA:
2058		snprintf(str, size,
2059			 "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c",
2060				field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
2061				GET_INTR_TARGET(field2),
2062				xhci_trb_type_string(type),
2063				field3 & TRB_IDT ? 'I' : 'i',
2064				field3 & TRB_IOC ? 'I' : 'i',
2065				field3 & TRB_CHAIN ? 'C' : 'c',
2066				field3 & TRB_NO_SNOOP ? 'S' : 's',
2067				field3 & TRB_ISP ? 'I' : 'i',
2068				field3 & TRB_ENT ? 'E' : 'e',
2069				field3 & TRB_CYCLE ? 'C' : 'c');
2070		break;
2071	case TRB_STATUS:
2072		snprintf(str, size,
2073			 "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c",
2074				field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
2075				GET_INTR_TARGET(field2),
2076				xhci_trb_type_string(type),
2077				field3 & TRB_IOC ? 'I' : 'i',
2078				field3 & TRB_CHAIN ? 'C' : 'c',
2079				field3 & TRB_ENT ? 'E' : 'e',
2080				field3 & TRB_CYCLE ? 'C' : 'c');
2081		break;
2082	case TRB_NORMAL:
2083	case TRB_EVENT_DATA:
2084	case TRB_TR_NOOP:
2085		snprintf(str, size,
2086			"Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c:%c",
2087			field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
2088			GET_INTR_TARGET(field2),
2089			xhci_trb_type_string(type),
2090			field3 & TRB_BEI ? 'B' : 'b',
2091			field3 & TRB_IDT ? 'I' : 'i',
2092			field3 & TRB_IOC ? 'I' : 'i',
2093			field3 & TRB_CHAIN ? 'C' : 'c',
2094			field3 & TRB_NO_SNOOP ? 'S' : 's',
2095			field3 & TRB_ISP ? 'I' : 'i',
2096			field3 & TRB_ENT ? 'E' : 'e',
2097			field3 & TRB_CYCLE ? 'C' : 'c');
2098		break;
2099	case TRB_ISOC:
2100		snprintf(str, size,
2101			"Buffer %08x%08x length %d TD size/TBC %d intr %d type '%s' TBC %u TLBPC %u frame_id %u flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
2102			field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
2103			GET_INTR_TARGET(field2),
2104			xhci_trb_type_string(type),
2105			GET_TBC(field3),
2106			GET_TLBPC(field3),
2107			GET_FRAME_ID(field3),
2108			field3 & TRB_SIA ? 'S' : 's',
2109			field3 & TRB_BEI ? 'B' : 'b',
2110			field3 & TRB_IDT ? 'I' : 'i',
2111			field3 & TRB_IOC ? 'I' : 'i',
2112			field3 & TRB_CHAIN ? 'C' : 'c',
2113			field3 & TRB_NO_SNOOP ? 'S' : 's',
2114			field3 & TRB_ISP ? 'I' : 'i',
2115			field3 & TRB_ENT ? 'E' : 'e',
2116			field3 & TRB_CYCLE ? 'C' : 'c');
2117		break;
2118	case TRB_CMD_NOOP:
2119	case TRB_ENABLE_SLOT:
2120		snprintf(str, size,
2121			"%s: flags %c",
2122			xhci_trb_type_string(type),
2123			field3 & TRB_CYCLE ? 'C' : 'c');
2124		break;
2125	case TRB_DISABLE_SLOT:
2126	case TRB_NEG_BANDWIDTH:
2127		snprintf(str, size,
2128			"%s: slot %d flags %c",
2129			xhci_trb_type_string(type),
2130			TRB_TO_SLOT_ID(field3),
2131			field3 & TRB_CYCLE ? 'C' : 'c');
2132		break;
2133	case TRB_ADDR_DEV:
2134		snprintf(str, size,
2135			"%s: ctx %08x%08x slot %d flags %c:%c",
2136			xhci_trb_type_string(type),
2137			field1, field0,
2138			TRB_TO_SLOT_ID(field3),
2139			field3 & TRB_BSR ? 'B' : 'b',
2140			field3 & TRB_CYCLE ? 'C' : 'c');
2141		break;
2142	case TRB_CONFIG_EP:
2143		snprintf(str, size,
2144			"%s: ctx %08x%08x slot %d flags %c:%c",
2145			xhci_trb_type_string(type),
2146			field1, field0,
2147			TRB_TO_SLOT_ID(field3),
2148			field3 & TRB_DC ? 'D' : 'd',
2149			field3 & TRB_CYCLE ? 'C' : 'c');
2150		break;
2151	case TRB_EVAL_CONTEXT:
2152		snprintf(str, size,
2153			"%s: ctx %08x%08x slot %d flags %c",
2154			xhci_trb_type_string(type),
2155			field1, field0,
2156			TRB_TO_SLOT_ID(field3),
2157			field3 & TRB_CYCLE ? 'C' : 'c');
2158		break;
2159	case TRB_RESET_EP:
2160		snprintf(str, size,
2161			"%s: ctx %08x%08x slot %d ep %d flags %c:%c",
2162			xhci_trb_type_string(type),
2163			field1, field0,
2164			TRB_TO_SLOT_ID(field3),
2165			TRB_TO_EP_ID(field3),
2166			field3 & TRB_TSP ? 'T' : 't',
2167			field3 & TRB_CYCLE ? 'C' : 'c');
2168		break;
2169	case TRB_STOP_RING:
2170		snprintf(str, size,
2171			"%s: slot %d sp %d ep %d flags %c",
2172			xhci_trb_type_string(type),
2173			TRB_TO_SLOT_ID(field3),
2174			TRB_TO_SUSPEND_PORT(field3),
2175			TRB_TO_EP_ID(field3),
2176			field3 & TRB_CYCLE ? 'C' : 'c');
2177		break;
2178	case TRB_SET_DEQ:
2179		snprintf(str, size,
2180			"%s: deq %08x%08x stream %d slot %d ep %d flags %c",
2181			xhci_trb_type_string(type),
2182			field1, field0,
2183			TRB_TO_STREAM_ID(field2),
2184			TRB_TO_SLOT_ID(field3),
2185			TRB_TO_EP_ID(field3),
2186			field3 & TRB_CYCLE ? 'C' : 'c');
2187		break;
2188	case TRB_RESET_DEV:
2189		snprintf(str, size,
2190			"%s: slot %d flags %c",
2191			xhci_trb_type_string(type),
2192			TRB_TO_SLOT_ID(field3),
2193			field3 & TRB_CYCLE ? 'C' : 'c');
2194		break;
2195	case TRB_FORCE_EVENT:
2196		snprintf(str, size,
2197			"%s: event %08x%08x vf intr %d vf id %d flags %c",
2198			xhci_trb_type_string(type),
2199			field1, field0,
2200			TRB_TO_VF_INTR_TARGET(field2),
2201			TRB_TO_VF_ID(field3),
2202			field3 & TRB_CYCLE ? 'C' : 'c');
2203		break;
2204	case TRB_SET_LT:
2205		snprintf(str, size,
2206			"%s: belt %d flags %c",
2207			xhci_trb_type_string(type),
2208			TRB_TO_BELT(field3),
2209			field3 & TRB_CYCLE ? 'C' : 'c');
2210		break;
2211	case TRB_GET_BW:
2212		snprintf(str, size,
2213			"%s: ctx %08x%08x slot %d speed %d flags %c",
2214			xhci_trb_type_string(type),
2215			field1, field0,
2216			TRB_TO_SLOT_ID(field3),
2217			TRB_TO_DEV_SPEED(field3),
2218			field3 & TRB_CYCLE ? 'C' : 'c');
2219		break;
2220	case TRB_FORCE_HEADER:
2221		snprintf(str, size,
2222			"%s: info %08x%08x%08x pkt type %d roothub port %d flags %c",
2223			xhci_trb_type_string(type),
2224			field2, field1, field0 & 0xffffffe0,
2225			TRB_TO_PACKET_TYPE(field0),
2226			TRB_TO_ROOTHUB_PORT(field3),
2227			field3 & TRB_CYCLE ? 'C' : 'c');
2228		break;
2229	default:
2230		snprintf(str, size,
2231			"type '%s' -> raw %08x %08x %08x %08x",
2232			xhci_trb_type_string(type),
2233			field0, field1, field2, field3);
2234	}
2235
2236	return str;
2237}
2238
2239static inline const char *xhci_decode_ctrl_ctx(char *str,
2240		unsigned long drop, unsigned long add)
2241{
2242	unsigned int	bit;
2243	int		ret = 0;
2244
2245	str[0] = '\0';
2246
2247	if (drop) {
2248		ret = sprintf(str, "Drop:");
2249		for_each_set_bit(bit, &drop, 32)
2250			ret += sprintf(str + ret, " %d%s",
2251				       bit / 2,
2252				       bit % 2 ? "in":"out");
2253		ret += sprintf(str + ret, ", ");
2254	}
2255
2256	if (add) {
2257		ret += sprintf(str + ret, "Add:%s%s",
2258			       (add & SLOT_FLAG) ? " slot":"",
2259			       (add & EP0_FLAG) ? " ep0":"");
2260		add &= ~(SLOT_FLAG | EP0_FLAG);
2261		for_each_set_bit(bit, &add, 32)
2262			ret += sprintf(str + ret, " %d%s",
2263				       bit / 2,
2264				       bit % 2 ? "in":"out");
2265	}
2266	return str;
2267}
2268
2269static inline const char *xhci_decode_slot_context(char *str,
2270		u32 info, u32 info2, u32 tt_info, u32 state)
2271{
2272	u32 speed;
2273	u32 hub;
2274	u32 mtt;
2275	int ret = 0;
2276
2277	speed = info & DEV_SPEED;
2278	hub = info & DEV_HUB;
2279	mtt = info & DEV_MTT;
2280
2281	ret = sprintf(str, "RS %05x %s%s%s Ctx Entries %d MEL %d us Port# %d/%d",
2282			info & ROUTE_STRING_MASK,
2283			({ char *s;
2284			switch (speed) {
2285			case SLOT_SPEED_FS:
2286				s = "full-speed";
2287				break;
2288			case SLOT_SPEED_LS:
2289				s = "low-speed";
2290				break;
2291			case SLOT_SPEED_HS:
2292				s = "high-speed";
2293				break;
2294			case SLOT_SPEED_SS:
2295				s = "super-speed";
2296				break;
2297			case SLOT_SPEED_SSP:
2298				s = "super-speed plus";
2299				break;
2300			default:
2301				s = "UNKNOWN speed";
2302			} s; }),
2303			mtt ? " multi-TT" : "",
2304			hub ? " Hub" : "",
2305			(info & LAST_CTX_MASK) >> 27,
2306			info2 & MAX_EXIT,
2307			DEVINFO_TO_ROOT_HUB_PORT(info2),
2308			DEVINFO_TO_MAX_PORTS(info2));
2309
2310	ret += sprintf(str + ret, " [TT Slot %d Port# %d TTT %d Intr %d] Addr %d State %s",
2311			tt_info & TT_SLOT, (tt_info & TT_PORT) >> 8,
2312			GET_TT_THINK_TIME(tt_info), GET_INTR_TARGET(tt_info),
2313			state & DEV_ADDR_MASK,
2314			xhci_slot_state_string(GET_SLOT_STATE(state)));
2315
2316	return str;
2317}
2318
2319
2320static inline const char *xhci_portsc_link_state_string(u32 portsc)
2321{
2322	switch (portsc & PORT_PLS_MASK) {
2323	case XDEV_U0:
2324		return "U0";
2325	case XDEV_U1:
2326		return "U1";
2327	case XDEV_U2:
2328		return "U2";
2329	case XDEV_U3:
2330		return "U3";
2331	case XDEV_DISABLED:
2332		return "Disabled";
2333	case XDEV_RXDETECT:
2334		return "RxDetect";
2335	case XDEV_INACTIVE:
2336		return "Inactive";
2337	case XDEV_POLLING:
2338		return "Polling";
2339	case XDEV_RECOVERY:
2340		return "Recovery";
2341	case XDEV_HOT_RESET:
2342		return "Hot Reset";
2343	case XDEV_COMP_MODE:
2344		return "Compliance mode";
2345	case XDEV_TEST_MODE:
2346		return "Test mode";
2347	case XDEV_RESUME:
2348		return "Resume";
2349	default:
2350		break;
2351	}
2352	return "Unknown";
2353}
2354
2355static inline const char *xhci_decode_portsc(char *str, u32 portsc)
2356{
2357	int ret;
2358
2359	ret = sprintf(str, "0x%08x ", portsc);
2360
2361	if (portsc == ~(u32)0)
2362		return str;
2363
2364	ret += sprintf(str + ret, "%s %s %s Link:%s PortSpeed:%d ",
2365		      portsc & PORT_POWER	? "Powered" : "Powered-off",
2366		      portsc & PORT_CONNECT	? "Connected" : "Not-connected",
2367		      portsc & PORT_PE		? "Enabled" : "Disabled",
2368		      xhci_portsc_link_state_string(portsc),
2369		      DEV_PORT_SPEED(portsc));
2370
2371	if (portsc & PORT_OC)
2372		ret += sprintf(str + ret, "OverCurrent ");
2373	if (portsc & PORT_RESET)
2374		ret += sprintf(str + ret, "In-Reset ");
2375
2376	ret += sprintf(str + ret, "Change: ");
2377	if (portsc & PORT_CSC)
2378		ret += sprintf(str + ret, "CSC ");
2379	if (portsc & PORT_PEC)
2380		ret += sprintf(str + ret, "PEC ");
2381	if (portsc & PORT_WRC)
2382		ret += sprintf(str + ret, "WRC ");
2383	if (portsc & PORT_OCC)
2384		ret += sprintf(str + ret, "OCC ");
2385	if (portsc & PORT_RC)
2386		ret += sprintf(str + ret, "PRC ");
2387	if (portsc & PORT_PLC)
2388		ret += sprintf(str + ret, "PLC ");
2389	if (portsc & PORT_CEC)
2390		ret += sprintf(str + ret, "CEC ");
2391	if (portsc & PORT_CAS)
2392		ret += sprintf(str + ret, "CAS ");
2393
2394	ret += sprintf(str + ret, "Wake: ");
2395	if (portsc & PORT_WKCONN_E)
2396		ret += sprintf(str + ret, "WCE ");
2397	if (portsc & PORT_WKDISC_E)
2398		ret += sprintf(str + ret, "WDE ");
2399	if (portsc & PORT_WKOC_E)
2400		ret += sprintf(str + ret, "WOE ");
2401
2402	return str;
2403}
2404
2405static inline const char *xhci_decode_usbsts(char *str, u32 usbsts)
2406{
2407	int ret = 0;
2408
2409	ret = sprintf(str, " 0x%08x", usbsts);
2410
2411	if (usbsts == ~(u32)0)
2412		return str;
2413
2414	if (usbsts & STS_HALT)
2415		ret += sprintf(str + ret, " HCHalted");
2416	if (usbsts & STS_FATAL)
2417		ret += sprintf(str + ret, " HSE");
2418	if (usbsts & STS_EINT)
2419		ret += sprintf(str + ret, " EINT");
2420	if (usbsts & STS_PORT)
2421		ret += sprintf(str + ret, " PCD");
2422	if (usbsts & STS_SAVE)
2423		ret += sprintf(str + ret, " SSS");
2424	if (usbsts & STS_RESTORE)
2425		ret += sprintf(str + ret, " RSS");
2426	if (usbsts & STS_SRE)
2427		ret += sprintf(str + ret, " SRE");
2428	if (usbsts & STS_CNR)
2429		ret += sprintf(str + ret, " CNR");
2430	if (usbsts & STS_HCE)
2431		ret += sprintf(str + ret, " HCE");
2432
2433	return str;
2434}
2435
2436static inline const char *xhci_decode_doorbell(char *str, u32 slot, u32 doorbell)
2437{
2438	u8 ep;
2439	u16 stream;
2440	int ret;
2441
2442	ep = (doorbell & 0xff);
2443	stream = doorbell >> 16;
2444
2445	if (slot == 0) {
2446		sprintf(str, "Command Ring %d", doorbell);
2447		return str;
2448	}
2449	ret = sprintf(str, "Slot %d ", slot);
2450	if (ep > 0 && ep < 32)
2451		ret = sprintf(str + ret, "ep%d%s",
2452			      ep / 2,
2453			      ep % 2 ? "in" : "out");
2454	else if (ep == 0 || ep < 248)
2455		ret = sprintf(str + ret, "Reserved %d", ep);
2456	else
2457		ret = sprintf(str + ret, "Vendor Defined %d", ep);
2458	if (stream)
2459		ret = sprintf(str + ret, " Stream %d", stream);
2460
2461	return str;
2462}
2463
2464static inline const char *xhci_ep_state_string(u8 state)
2465{
2466	switch (state) {
2467	case EP_STATE_DISABLED:
2468		return "disabled";
2469	case EP_STATE_RUNNING:
2470		return "running";
2471	case EP_STATE_HALTED:
2472		return "halted";
2473	case EP_STATE_STOPPED:
2474		return "stopped";
2475	case EP_STATE_ERROR:
2476		return "error";
2477	default:
2478		return "INVALID";
2479	}
2480}
2481
2482static inline const char *xhci_ep_type_string(u8 type)
2483{
2484	switch (type) {
2485	case ISOC_OUT_EP:
2486		return "Isoc OUT";
2487	case BULK_OUT_EP:
2488		return "Bulk OUT";
2489	case INT_OUT_EP:
2490		return "Int OUT";
2491	case CTRL_EP:
2492		return "Ctrl";
2493	case ISOC_IN_EP:
2494		return "Isoc IN";
2495	case BULK_IN_EP:
2496		return "Bulk IN";
2497	case INT_IN_EP:
2498		return "Int IN";
2499	default:
2500		return "INVALID";
2501	}
2502}
2503
2504static inline const char *xhci_decode_ep_context(char *str, u32 info,
2505		u32 info2, u64 deq, u32 tx_info)
2506{
2507	int ret;
2508
2509	u32 esit;
2510	u16 maxp;
2511	u16 avg;
2512
2513	u8 max_pstr;
2514	u8 ep_state;
2515	u8 interval;
2516	u8 ep_type;
2517	u8 burst;
2518	u8 cerr;
2519	u8 mult;
2520
2521	bool lsa;
2522	bool hid;
2523
2524	esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
2525		CTX_TO_MAX_ESIT_PAYLOAD(tx_info);
2526
2527	ep_state = info & EP_STATE_MASK;
2528	max_pstr = CTX_TO_EP_MAXPSTREAMS(info);
2529	interval = CTX_TO_EP_INTERVAL(info);
2530	mult = CTX_TO_EP_MULT(info) + 1;
2531	lsa = !!(info & EP_HAS_LSA);
2532
2533	cerr = (info2 & (3 << 1)) >> 1;
2534	ep_type = CTX_TO_EP_TYPE(info2);
2535	hid = !!(info2 & (1 << 7));
2536	burst = CTX_TO_MAX_BURST(info2);
2537	maxp = MAX_PACKET_DECODED(info2);
2538
2539	avg = EP_AVG_TRB_LENGTH(tx_info);
2540
2541	ret = sprintf(str, "State %s mult %d max P. Streams %d %s",
2542			xhci_ep_state_string(ep_state), mult,
2543			max_pstr, lsa ? "LSA " : "");
2544
2545	ret += sprintf(str + ret, "interval %d us max ESIT payload %d CErr %d ",
2546			(1 << interval) * 125, esit, cerr);
2547
2548	ret += sprintf(str + ret, "Type %s %sburst %d maxp %d deq %016llx ",
2549			xhci_ep_type_string(ep_type), hid ? "HID" : "",
2550			burst, maxp, deq);
2551
2552	ret += sprintf(str + ret, "avg trb len %d", avg);
2553
2554	return str;
2555}
2556
2557#endif /* __LINUX_XHCI_HCD_H */